diff --git pom.xml pom.xml index 1136cbe..7c25884 100644 --- pom.xml +++ pom.xml @@ -503,6 +503,22 @@ + + + org.codehaus.mojo + + build-helper-maven-plugin + + [1.5,) + + add-source + add-test-resource + + + + + + diff --git src/main/java/org/apache/hadoop/hbase/HRegionInfo.java src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index 0ee74e7..1ab4038 100644 --- src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -563,7 +563,7 @@ implements WritableComparable { throw new RuntimeException(e); } FSTableDescriptors fstd = - new FSTableDescriptors(fs, new Path(c.get(HConstants.HBASE_DIR))); + new FSTableDescriptors(c, fs, new Path(c.get(HConstants.HBASE_DIR))); try { return fstd.get(this.tableName); } catch (IOException e) { @@ -585,7 +585,7 @@ implements WritableComparable { throw new RuntimeException(e); } FSTableDescriptors fstd = - new FSTableDescriptors(fs, new Path(c.get(HConstants.HBASE_DIR))); + new FSTableDescriptors(c, fs, new Path(c.get(HConstants.HBASE_DIR))); try { fstd.add(newDesc); } catch (IOException e) { diff --git src/main/java/org/apache/hadoop/hbase/master/HMaster.java src/main/java/org/apache/hadoop/hbase/master/HMaster.java index c680630..b91ab15 100644 --- src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -513,7 +513,7 @@ Server { this.fileSystemManager = new MasterFileSystem(this, this, metrics, masterRecovery); this.tableDescriptors = - new FSTableDescriptors(this.fileSystemManager.getFileSystem(), + new FSTableDescriptors(this.conf, this.fileSystemManager.getFileSystem(), this.fileSystemManager.getRootDir()); // publish cluster ID diff --git src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 694ba0d..8e3f3ba 100644 --- src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -377,10 +377,11 @@ public class MasterFileSystem { } private void createRootTableInfo(Path rd) throws IOException { + FSTableDescriptors fstd = new FSTableDescriptors(conf, fs, rd); // Create ROOT tableInfo if required. - if (!FSTableDescriptors.isTableInfoExists(fs, rd, + if (!fstd.isTableInfoExists( Bytes.toString(HRegionInfo.ROOT_REGIONINFO.getTableName()))) { - FSTableDescriptors.createTableDescriptor(HTableDescriptor.ROOT_TABLEDESC, this.conf); + fstd.createTableDescriptor(HTableDescriptor.ROOT_TABLEDESC); } } @@ -486,7 +487,7 @@ public class MasterFileSystem { */ public void createTableDescriptor(HTableDescriptor htableDescriptor) throws IOException { - FSTableDescriptors.createTableDescriptor(htableDescriptor, conf); + new FSTableDescriptors(conf).createTableDescriptor(htableDescriptor); } /** diff --git src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java index af25def..dd3c9d9 100644 --- src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java +++ src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java @@ -137,7 +137,7 @@ public class CreateTableHandler extends EventHandler { // tableDir is created. Should we change below method to be createTable // where we create table in tmp dir with its table descriptor file and then // do rename to move it into place? - FSTableDescriptors.createTableDescriptor(this.hTableDescriptor, this.conf); + new FSTableDescriptors(this.conf).createTableDescriptor(this.hTableDescriptor); List regionInfos = new ArrayList(); final int batchSize = diff --git src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java index e123c22..8c0ae0c 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java @@ -53,6 +53,7 @@ import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -91,6 +92,7 @@ public class CompactionTool extends Configured implements Tool { private final Configuration conf; private final FileSystem fs; private final Path tmpDir; + private final FSTableDescriptors fstd; public CompactionWorker(final FileSystem fs, final Configuration conf) { this.conf = conf; @@ -98,6 +100,7 @@ public class CompactionTool extends Configured implements Tool { this.deleteCompacted = conf.getBoolean(CONF_DELETE_COMPACTED, false); this.tmpDir = new Path(conf.get(CONF_TMP_DIR)); this.fs = fs; + this.fstd = getFSTableDescriptors(conf, fs); } /** @@ -110,14 +113,14 @@ public class CompactionTool extends Configured implements Tool { if (isFamilyDir(fs, path)) { Path regionDir = path.getParent(); Path tableDir = regionDir.getParent(); - HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(fs, tableDir); + HTableDescriptor htd = fstd.getTableDescriptorFromFs(tableDir); HRegion region = loadRegion(fs, conf, htd, regionDir); compactStoreFiles(region, path, compactOnce); } else if (isRegionDir(fs, path)) { Path tableDir = path.getParent(); - HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(fs, tableDir); + HTableDescriptor htd = fstd.getTableDescriptorFromFs(tableDir); compactRegion(htd, path, compactOnce); - } else if (isTableDir(fs, path)) { + } else if (isTableDir(fstd, path)) { compactTable(path, compactOnce); } else { throw new IOException( @@ -127,7 +130,7 @@ public class CompactionTool extends Configured implements Tool { private void compactTable(final Path tableDir, final boolean compactOnce) throws IOException { - HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(fs, tableDir); + HTableDescriptor htd = fstd.getTableDescriptorFromFs(tableDir); LOG.info("Compact table=" + htd.getNameAsString()); for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) { compactRegion(htd, regionDir, compactOnce); @@ -201,8 +204,8 @@ public class CompactionTool extends Configured implements Tool { return fs.exists(regionInfo); } - private static boolean isTableDir(final FileSystem fs, final Path path) throws IOException { - return FSTableDescriptors.getTableInfoPath(fs, path) != null; + private static boolean isTableDir(final FSTableDescriptors fstd, final Path path) throws IOException { + return fstd.getTableInfoPath(path) != null; } private static boolean isFamilyDir(final FileSystem fs, final Path path) throws IOException { @@ -300,8 +303,9 @@ public class CompactionTool extends Configured implements Tool { * The file is a TextFile with each line corrisponding to a * store files directory to compact. */ - public static void createInputFile(final FileSystem fs, final Path path, - final Set toCompactDirs) throws IOException { + public static void createInputFile(final Configuration conf, final FileSystem fs, + final Path path, final Set toCompactDirs) throws IOException { + FSTableDescriptors fstd = getFSTableDescriptors(conf, fs); // Extract the list of store dirs List storeDirs = new LinkedList(); for (Path compactDir: toCompactDirs) { @@ -311,7 +315,7 @@ public class CompactionTool extends Configured implements Tool { for (Path familyDir: FSUtils.getFamilyDirs(fs, compactDir)) { storeDirs.add(familyDir); } - } else if (isTableDir(fs, compactDir)) { + } else if (isTableDir(fstd, compactDir)) { // Lookup regions for (Path regionDir: FSUtils.getRegionDirs(fs, compactDir)) { for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) { @@ -339,6 +343,13 @@ public class CompactionTool extends Configured implements Tool { } } + private static FSTableDescriptors getFSTableDescriptors(final Configuration conf, + final FileSystem fs) + { + Path hbaseRootDir = new Path(conf.get(HConstants.HBASE_DIR)).makeQualified(fs); + return new FSTableDescriptors(conf, fs, hbaseRootDir); + } + /** * Execute compaction, using a Map-Reduce job. */ @@ -362,7 +373,7 @@ public class CompactionTool extends Configured implements Tool { try { // Create input file with the store dirs Path inputPath = new Path(stagingDir, stagingName); - CompactionInputFormat.createInputFile(fs, inputPath, toCompactDirs); + CompactionInputFormat.createInputFile(conf, fs, inputPath, toCompactDirs); CompactionInputFormat.addInputPath(job, inputPath); // Initialize credential for secure cluster diff --git src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 86d1337..1952fbb 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1034,7 +1034,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, // Get fs instance used by this RS this.fs = new HFileSystem(this.conf, this.useHBaseChecksum); this.rootDir = new Path(this.conf.get(HConstants.HBASE_DIR)); - this.tableDescriptors = new FSTableDescriptors(this.fs, this.rootDir, true); + this.tableDescriptors = new FSTableDescriptors(this.conf, this.fs, this.rootDir, true); this.hlog = setupWALAndReplication(); // Init in here rather than in constructor after thread name has been set this.metrics = new RegionServerMetrics(); diff --git src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 5141121..22b5836 100644 --- src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.util; import java.io.FileNotFoundException; import java.io.IOException; -import java.util.Arrays; import java.util.Comparator; import java.util.List; import java.util.Map; @@ -43,6 +42,8 @@ import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.TableInfoMissingException; +import com.google.common.annotations.VisibleForTesting; + /** * Implementation of {@link TableDescriptors} that reads descriptors from the @@ -52,7 +53,7 @@ import org.apache.hadoop.hbase.TableInfoMissingException; * *

Also has utility for keeping up the table descriptors tableinfo file. * The table schema file is kept under the table directory in the filesystem. - * It has a {@link #TABLEINFO_NAME} prefix and then a suffix that is the + * It has a {@link #TABLEINFO_FILE_PREFIX} and then a suffix that is the * edit sequenceid: e.g. .tableinfo.0000000003. This sequenceid * is always increasing. It starts at zero. The table schema file with the * highest sequenceid has the most recent schema edit. Usually there is one file @@ -63,29 +64,33 @@ import org.apache.hadoop.hbase.TableInfoMissingException; */ public class FSTableDescriptors implements TableDescriptors { private static final Log LOG = LogFactory.getLog(FSTableDescriptors.class); + private final Configuration conf; private final FileSystem fs; private final Path rootdir; private final boolean fsreadonly; - long cachehits = 0; - long invocations = 0; + @VisibleForTesting long cachehits = 0; + @VisibleForTesting long invocations = 0; - /** The file name used to store HTD in HDFS */ - public static final String TABLEINFO_NAME = ".tableinfo"; + /** The file name prefix used to store HTD in HDFS */ + static final String TABLEINFO_FILE_PREFIX = ".tableinfo"; + static final String TABLEINFO_DIR = ".tabledesc"; + static final String TMP_DIR = ".tmp"; + static final String LOCKFILE = ".lock"; // This cache does not age out the old stuff. Thinking is that the amount // of data we keep up in here is so small, no need to do occasional purge. // TODO. - private final Map cache = - new ConcurrentHashMap(); + private final Map cache = + new ConcurrentHashMap(); /** * Data structure to hold modification time and table descriptor. */ - static class TableDescriptorModtime { + private static class TableDescriptorAndModtime { private final HTableDescriptor descriptor; private final long modtime; - TableDescriptorModtime(final long modtime, final HTableDescriptor htd) { + TableDescriptorAndModtime(final long modtime, final HTableDescriptor htd) { this.descriptor = htd; this.modtime = modtime; } @@ -99,26 +104,37 @@ public class FSTableDescriptors implements TableDescriptors { } } - public FSTableDescriptors(final FileSystem fs, final Path rootdir) { - this(fs, rootdir, false); + /** + * Construct a FSTableDescriptors instance using the hbase root dir of the given + * conf and the filesystem where that root dir lives. + * This instance can do write operations (is not read only). + */ + public FSTableDescriptors(final Configuration conf) throws IOException { + this(conf, FSUtils.getCurrentFileSystem(conf), FSUtils.getRootDir(conf)); + } + + public FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir) { + this(conf, fs, rootdir, false); } /** - * @param fs - * @param rootdir - * @param fsreadOnly True if we are read-only when it comes to filesystem + * @param fsreadonly True if we are read-only when it comes to filesystem * operations; i.e. on remove, we do not do delete in fs. */ - public FSTableDescriptors(final FileSystem fs, final Path rootdir, - final boolean fsreadOnly) { + public FSTableDescriptors(final Configuration conf, final FileSystem fs, + final Path rootdir, final boolean fsreadonly) { super(); + this.conf = conf; this.fs = fs; this.rootdir = rootdir; - this.fsreadonly = fsreadOnly; + this.fsreadonly = fsreadonly; } - /* (non-Javadoc) - * @see org.apache.hadoop.hbase.TableDescriptors#getHTableDescriptor(java.lang.String) + /** + * Get the current table descriptor for the given table, or null if none exists. + * + * Uses a local cache of the descriptor but still checks the filesystem on each call + * to see if a newer file has been created since the cached one was read. */ @Override public HTableDescriptor get(final byte [] tablename) @@ -126,8 +142,11 @@ public class FSTableDescriptors implements TableDescriptors { return get(Bytes.toString(tablename)); } - /* (non-Javadoc) - * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptor(byte[]) + /** + * Get the current table descriptor for the given table, or null if none exists. + * + * Uses a local cache of the descriptor but still checks the filesystem on each call + * to see if a newer file has been created since the cached one was read. */ @Override public HTableDescriptor get(final String tablename) @@ -144,23 +163,23 @@ public class FSTableDescriptors implements TableDescriptors { // .META. and -ROOT- is already handled. If some one tries to get the descriptor for // .logs, .oldlogs or .corrupt throw an exception. if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename)) { - throw new IOException("No descriptor found for table = " + tablename); + throw new IOException("No descriptor found for non table = " + tablename); } // Look in cache of descriptors. - TableDescriptorModtime cachedtdm = this.cache.get(tablename); + TableDescriptorAndModtime cachedtdm = this.cache.get(tablename); if (cachedtdm != null) { // Check mod time has not changed (this is trip to NN). - if (getTableInfoModtime(this.fs, this.rootdir, tablename) <= cachedtdm.getModtime()) { + if (getTableInfoModtime(tablename) <= cachedtdm.getModtime()) { cachehits++; return cachedtdm.getTableDescriptor(); } } - TableDescriptorModtime tdmt = null; + TableDescriptorAndModtime tdmt = null; try { - tdmt = getTableDescriptorModtime(this.fs, this.rootdir, tablename); + tdmt = getTableDescriptorAndModtime(tablename); } catch (NullPointerException e) { LOG.debug("Exception during readTableDecriptor. Current table name = " + tablename, e); @@ -179,8 +198,8 @@ public class FSTableDescriptors implements TableDescriptors { return tdmt == null ? null : tdmt.getTableDescriptor(); } - /* (non-Javadoc) - * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path) + /** + * Returns a map from table name to table descriptor for all tables. */ @Override public Map getAll() @@ -202,8 +221,15 @@ public class FSTableDescriptors implements TableDescriptors { return htds; } + /** + * Adds (or updates) the table descriptor to the FileSystem + * and updates the local cache with it. + */ @Override public void add(HTableDescriptor htd) throws IOException { + if (fsreadonly) { + throw new NotImplementedException("Cannot add a table descriptor - in read only mode"); + } if (Bytes.equals(HConstants.ROOT_TABLE_NAME, htd.getName())) { throw new NotImplementedException(); } @@ -211,30 +237,36 @@ public class FSTableDescriptors implements TableDescriptors { throw new NotImplementedException(); } if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) { - throw new NotImplementedException(); + throw new NotImplementedException("Cannot add a table descriptor for a reserved subdirectory name: " + htd.getNameAsString()); } - if (!this.fsreadonly) updateHTableDescriptor(this.fs, this.rootdir, htd); - long modtime = getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString()); - this.cache.put(htd.getNameAsString(), new TableDescriptorModtime(modtime, htd)); + updateTableDescriptor(htd); + long modtime = getTableInfoModtime(htd.getNameAsString()); + this.cache.put(htd.getNameAsString(), new TableDescriptorAndModtime(modtime, htd)); } + /** + * Removes the table descriptor from the local cache and returns it. + * If not in read only mode, it also deletes the entire table directory(!) + * from the FileSystem. + */ @Override public HTableDescriptor remove(final String tablename) throws IOException { - if (!this.fsreadonly) { - Path tabledir = FSUtils.getTablePath(this.rootdir, tablename); - if (this.fs.exists(tabledir)) { - if (!this.fs.delete(tabledir, true)) { - throw new IOException("Failed delete of " + tabledir.toString()); - } + if (fsreadonly) { + throw new NotImplementedException("Cannot remove a table descriptor - in read only mode"); + } + Path tabledir = getTableDirectory(tablename); + if (this.fs.exists(tabledir)) { + if (!this.fs.delete(tabledir, true)) { + throw new IOException("Failed delete of " + tabledir.toString()); } } - TableDescriptorModtime tdm = this.cache.remove(tablename); + TableDescriptorAndModtime tdm = this.cache.remove(tablename); return tdm == null ? null : tdm.getTableDescriptor(); } /** - * Checks if .tableinfo exists for given table + * Checks if a current table info file exists for the given table * * @param fs file system * @param rootdir root directory of HBase installation @@ -242,77 +274,139 @@ public class FSTableDescriptors implements TableDescriptors { * @return true if exists * @throws IOException */ - public static boolean isTableInfoExists(FileSystem fs, Path rootdir, - String tableName) throws IOException { - FileStatus status = getTableInfoPath(fs, rootdir, tableName); - return status == null? false: fs.exists(status.getPath()); + public boolean isTableInfoExists(String tableName) throws IOException { + return getTableInfoPath(tableName) != null; } - - private static FileStatus getTableInfoPath(final FileSystem fs, - final Path rootdir, final String tableName) - throws IOException { - Path tabledir = FSUtils.getTablePath(rootdir, tableName); - return getTableInfoPath(fs, tabledir); + + /** + * Return the table directory in HDFS + */ + private Path getTableDirectory(final String tableName) { + return FSUtils.getTablePath(rootdir, tableName); + } + + /** + * Find the most current table info file for the given table in the hbase root directory. + * @return The file status of the current table info file or null if it does not exist + */ + private FileStatus getTableInfoPath(final String tableName) throws IOException { + Path tableDir = getTableDirectory(tableName); + return getTableInfoPath(tableDir); } /** - * Looks under the table directory in the filesystem for files with a - * {@link #TABLEINFO_NAME} prefix. Returns reference to the 'latest' instance. + * Find the most current table info file for the table in the given directory. + * Checks first within the {@link TABLEINFO_DIR} subdirectory, then + * falls back to the table directory. + * + * If we're not in read only mode and the {@link TABLEINFO_DIR} subdirectory is not present + * then it creates it. * @param fs - * @param tabledir - * @return The 'current' tableinfo file. + * @param tableDir + * @return The file status of the current table info file or null if it does not exist * @throws IOException */ - public static FileStatus getTableInfoPath(final FileSystem fs, - final Path tabledir) + public FileStatus getTableInfoPath(Path tableDir) throws IOException { - FileStatus [] status = FSUtils.listStatus(fs, tabledir, new PathFilter() { - @Override - public boolean accept(Path p) { - // Accept any file that starts with TABLEINFO_NAME - return p.getName().startsWith(TABLEINFO_NAME); + // First check for file in TABLEINFO_DIR subdirectory + Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR); + FileStatus tableInfoStatus = getCurrentTableInfoFileStatus(tableInfoDir, true); + if (tableInfoStatus == null) { + // if it's not in the TABLEINFO_DIR subdirectory, fall back and check + // the older location - directory in the tabledir + tableInfoStatus = getCurrentTableInfoFileStatus(tableDir, false); + + if (!fsreadonly && tableInfoStatus != null) { + // cache it back in the TABLEINFO_DIR subdirectory for the next reader + Path tableInfoFile = tableInfoStatus.getPath(); + Path tmpTableDir = new Path(tableDir, TMP_DIR); + Path tempPath = new Path(tmpTableDir, tableInfoFile.getName()); + Path destPath = new Path(tableInfoDir, tableInfoFile.getName()); + HTableDescriptor hTableDescriptor = readTableDescriptor(tableInfoFile); + try { + writeThenRenameIntoPlace(hTableDescriptor, tempPath, destPath, true); + } catch (IOException ioe) { + // may be race condition on multiple readers + LOG.info("Failed to cache descriptor, may be a race: " + ioe); + } } - }); + } + return tableInfoStatus; + } + + private static final PathFilter TABLEINFO_PATHFILTER = new PathFilter() { + @Override + public boolean accept(Path p) { + // Accept any file that starts with TABLEINFO_NAME + return p.getName().startsWith(TABLEINFO_FILE_PREFIX) || LOCKFILE.equals(p.getName()); + }}; + + /** + * Looks within the given directory for any table info files + * and takes the 'current' one - meaning the one with the highest sequence number if present + * or no sequence number at all if none exist (for backward compatibility from before there + * were sequence numbers). + * If there are multiple possible files found + * and the we're not in read only mode it also deletes the older files. + * @return The FileStatus for the current tableInfo file, if one exists in the given directory + */ + private FileStatus getCurrentTableInfoFileStatus(final Path dir, boolean checkForLockFile) + throws IOException { + FileStatus [] status = FSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER); if (status == null || status.length < 1) return null; - Arrays.sort(status, new FileStatusFileNameComparator()); - if (status.length > 1) { - // Clean away old versions of .tableinfo - for (int i = 1; i < status.length; i++) { - Path p = status[i].getPath(); - // Clean up old versions - if (!fs.delete(p, false)) { - LOG.warn("Failed cleanup of " + status); - } else { - LOG.debug("Cleaned up old tableinfo file " + p); + FileStatus mostCurrent = null; + for (FileStatus file : status) { + if (LOCKFILE.equals(file.getPath().getName())) { + if (checkForLockFile) { + LOG.warn("Lock file present at: " + file.getPath() + " - will read descriptor from table directory instead." + + LOCK_WARNING); + return null; + } + } else { + if (mostCurrent == null + || TABLEINFO_FILESTATUS_COMPARATOR.compare(file, mostCurrent) < 0) { + mostCurrent = file; } } } - return status[0]; + if (!fsreadonly && status.length > 1) { + // Clean away old versions + for (FileStatus file : status) { + Path path = file.getPath(); + if (file != mostCurrent && !LOCKFILE.equals(path.getName())) { + if (!fs.delete(file.getPath(), false)) { + LOG.warn("Failed cleanup of " + path); + } else { + LOG.debug("Cleaned up old tableinfo file " + path); + } + } + } + } + return mostCurrent; } /** * Compare {@link FileStatus} instances by {@link Path#getName()}. * Returns in reverse order. */ - static class FileStatusFileNameComparator - implements Comparator { - @Override - public int compare(FileStatus left, FileStatus right) { - return -left.compareTo(right); - } - } + @VisibleForTesting static final Comparator TABLEINFO_FILESTATUS_COMPARATOR + = new Comparator() { + @Override + public int compare(FileStatus left, FileStatus right) { + return -left.compareTo(right); + }}; /** * Width of the sequenceid that is a suffix on a tableinfo file. */ - static final int WIDTH_OF_SEQUENCE_ID = 10; + @VisibleForTesting static final int WIDTH_OF_SEQUENCE_ID = 10; /* * @param number Number to use as suffix. - * @return Returns zero-prefixed 5-byte wide decimal version of passed + * @return Returns zero-prefixed decimal version of passed * number (Does absolute in case number is negative). */ - static String formatTableInfoSequenceId(final int number) { + private static String formatTableInfoSequenceId(final int number) { byte [] b = new byte[WIDTH_OF_SEQUENCE_ID]; int d = Math.abs(number); for (int i = b.length - 1; i >= 0; i--) { @@ -327,17 +421,16 @@ public class FSTableDescriptors implements TableDescriptors { * Use regex because may encounter oldstyle .tableinfos where there is no * sequenceid on the end. */ - private static final Pattern SUFFIX = - Pattern.compile(TABLEINFO_NAME + "(\\.([0-9]{" + WIDTH_OF_SEQUENCE_ID + "}))?$"); - + private static final Pattern TABLEINFO_FILE_REGEX = + Pattern.compile(TABLEINFO_FILE_PREFIX + "(\\.([0-9]{" + WIDTH_OF_SEQUENCE_ID + "}))?$"); /** * @param p Path to a .tableinfo file. * @return The current editid or 0 if none found. */ - static int getTableInfoSequenceid(final Path p) { + @VisibleForTesting static int getTableInfoSequenceId(final Path p) { if (p == null) return 0; - Matcher m = SUFFIX.matcher(p.getName()); + Matcher m = TABLEINFO_FILE_REGEX.matcher(p.getName()); if (!m.matches()) throw new IllegalArgumentException(p.toString()); String suffix = m.group(2); if (suffix == null || suffix.length() <= 0) return 0; @@ -349,74 +442,55 @@ public class FSTableDescriptors implements TableDescriptors { * @param sequenceid * @return Name of tableinfo file. */ - static Path getTableInfoFileName(final Path tabledir, final int sequenceid) { - return new Path(tabledir, - TABLEINFO_NAME + "." + formatTableInfoSequenceId(sequenceid)); + @VisibleForTesting static String getTableInfoFileName(final int sequenceid) { + return TABLEINFO_FILE_PREFIX + "." + formatTableInfoSequenceId(sequenceid); } /** * @param fs * @param rootdir * @param tableName - * @return Modification time for the table {@link #TABLEINFO_NAME} file + * @return Modification time for the table {@link #TABLEINFO_FILE_PREFIX} file * or 0 if no tableinfo file found. * @throws IOException */ - static long getTableInfoModtime(final FileSystem fs, final Path rootdir, - final String tableName) - throws IOException { - FileStatus status = getTableInfoPath(fs, rootdir, tableName); - return status == null? 0: status.getModificationTime(); + private long getTableInfoModtime(final String tableName) throws IOException { + FileStatus status = getTableInfoPath(tableName); + return status == null ? 0 : status.getModificationTime(); } /** - * Get HTD from HDFS. - * @param fs - * @param hbaseRootDir - * @param tableName - * @return Descriptor or null if none found. - * @throws IOException + * Returns the latest table descriptor for the given table directly from the file system + * if it exists, bypassing the local cache. + * Returns null if it's not found. */ - public static HTableDescriptor getTableDescriptor(FileSystem fs, - Path hbaseRootDir, byte[] tableName) - throws IOException { - HTableDescriptor htd = null; - try { - TableDescriptorModtime tdmt = - getTableDescriptorModtime(fs, hbaseRootDir, Bytes.toString(tableName)); - htd = tdmt == null ? null : tdmt.getTableDescriptor(); - } catch (NullPointerException e) { - LOG.debug("Exception during readTableDecriptor. Current table name = " - + Bytes.toString(tableName), e); - } - return htd; - } - - static HTableDescriptor getTableDescriptor(FileSystem fs, - Path hbaseRootDir, String tableName) throws NullPointerException, IOException { - TableDescriptorModtime tdmt = getTableDescriptorModtime(fs, hbaseRootDir, tableName); + public HTableDescriptor getTableDescriptorFromFs(String tableName) throws IOException { + TableDescriptorAndModtime tdmt = getTableDescriptorAndModtime(tableName); return tdmt == null ? null : tdmt.getTableDescriptor(); } - static TableDescriptorModtime getTableDescriptorModtime(FileSystem fs, - Path hbaseRootDir, String tableName) throws NullPointerException, IOException{ + private TableDescriptorAndModtime getTableDescriptorAndModtime(String tableName) + throws IOException { // ignore both -ROOT- and .META. tables if (Bytes.compareTo(Bytes.toBytes(tableName), HConstants.ROOT_TABLE_NAME) == 0 || Bytes.compareTo(Bytes.toBytes(tableName), HConstants.META_TABLE_NAME) == 0) { return null; } - return getTableDescriptorModtime(fs, FSUtils.getTablePath(hbaseRootDir, tableName)); + return getTableDescriptorAndModtime(getTableDirectory(tableName)); } - static TableDescriptorModtime getTableDescriptorModtime(FileSystem fs, Path tableDir) - throws NullPointerException, IOException { - if (tableDir == null) throw new NullPointerException(); - FileStatus status = getTableInfoPath(fs, tableDir); + private TableDescriptorAndModtime getTableDescriptorAndModtime(Path tableDir) + throws IOException { + FileStatus status = getTableInfoPath(tableDir); if (status == null) { - throw new TableInfoMissingException("No .tableinfo file under " - + tableDir.toUri()); + throw new TableInfoMissingException("No .tableinfo file under " + tableDir.toUri()); } - FSDataInputStream fsDataInputStream = fs.open(status.getPath()); + HTableDescriptor hTableDescriptor = readTableDescriptor(status.getPath()); + return new TableDescriptorAndModtime(status.getModificationTime(), hTableDescriptor); + } + + private HTableDescriptor readTableDescriptor(Path descriptorFile) throws IOException { + FSDataInputStream fsDataInputStream = fs.open(descriptorFile); HTableDescriptor hTableDescriptor = null; try { hTableDescriptor = new HTableDescriptor(); @@ -424,111 +498,195 @@ public class FSTableDescriptors implements TableDescriptors { } finally { fsDataInputStream.close(); } - return new TableDescriptorModtime(status.getModificationTime(), hTableDescriptor); + return hTableDescriptor; } - - public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir) - throws IOException, NullPointerException { - TableDescriptorModtime tdmt = getTableDescriptorModtime(fs, tableDir); + + /** + * Returns the latest table descriptor for the table at the given directory + * directly from the file system if it exists, bypassing the local cache. + * Returns null if it's not found. + */ + public HTableDescriptor getTableDescriptorFromFs(Path tableDir) throws IOException { + TableDescriptorAndModtime tdmt = getTableDescriptorAndModtime(tableDir); return tdmt == null? null: tdmt.getTableDescriptor(); } /** - * Update table descriptor - * @param fs - * @param conf - * @param hTableDescriptor - * @return New tableinfo or null if we failed update. + * Update table descriptor on the file system * @throws IOException Thrown if failed update. + * @throws NotImplementedException if in read only mode */ - static Path updateHTableDescriptor(FileSystem fs, Path rootdir, - HTableDescriptor hTableDescriptor) + @VisibleForTesting Path updateTableDescriptor(HTableDescriptor hTableDescriptor) throws IOException { - Path tableDir = FSUtils.getTablePath(rootdir, hTableDescriptor.getName()); - Path p = writeTableDescriptor(fs, hTableDescriptor, tableDir, - getTableInfoPath(fs, tableDir)); + if (fsreadonly) { + throw new NotImplementedException("Cannot update a table descriptor - in read only mode"); + } + Path tableDir = getTableDirectory(hTableDescriptor.getNameAsString()); + Path p = writeTableDescriptor(hTableDescriptor, tableDir, getTableInfoPath(tableDir)); if (p == null) throw new IOException("Failed update"); LOG.info("Updated tableinfo=" + p); return p; } /** - * Deletes a table's directory from the file system if exists. Used in unit - * tests. + * Deletes all the table descriptor files from the file system. + * Not robust in case of failures - Used in unit tests only! + * @throws NotImplementedException if in read only mode */ - public static void deleteTableDescriptorIfExists(String tableName, - Configuration conf) throws IOException { - FileSystem fs = FSUtils.getCurrentFileSystem(conf); - FileStatus status = getTableInfoPath(fs, FSUtils.getRootDir(conf), tableName); - // The below deleteDirectory works for either file or directory. - if (status != null && fs.exists(status.getPath())) { - FSUtils.deleteDirectory(fs, status.getPath()); + public void deleteTableDescriptorIfExists(String tableName) throws IOException { + if (fsreadonly) { + throw new NotImplementedException("Cannot delete a table descriptor - in read only mode"); + } + + Path tableDir = getTableDirectory(tableName); + Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR); + Path lockFile = acquireLockFile(tableInfoDir); + try { + deleteTableDescriptorFiles(tableDir, Integer.MAX_VALUE); + deleteTableDescriptorFiles(tableInfoDir, Integer.MAX_VALUE); + } finally { + releaseLockFile(lockFile); } } /** - * @param fs - * @param hTableDescriptor - * @param tableDir - * @param status + * Deletes files matching the table info file pattern within the given directory + * whose sequenceId is at most the given max sequenceId. + */ + private void deleteTableDescriptorFiles(Path dir, int maxSequenceId) throws IOException { + FileStatus [] status = FSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER); + for (FileStatus file : status) { + Path path = file.getPath(); + if (!LOCKFILE.equals(path.getName())) { + int sequenceId = getTableInfoSequenceId(path); + if (sequenceId <= maxSequenceId) { + boolean success = FSUtils.delete(fs, path, false); + if (success) { + LOG.debug("Deleted table descriptor at " + path); + } else { + LOG.error("Failed to delete descriptor at " + path); + } + } + } + } + } + + /** + * Attempts to write a new table descriptor to the given table's directory. + * It first writes it to the .tmp dir then uses an atomic rename to move it into place. + * It begins at the currentSequenceId + 1 and tries 10 times to find a new sequence number + * not already in use. + * Removes the current descriptor file if passed in. + * * @return Descriptor file or null if we failed write. - * @throws IOException */ - private static Path writeTableDescriptor(final FileSystem fs, - final HTableDescriptor hTableDescriptor, final Path tableDir, - final FileStatus status) + private Path writeTableDescriptor(final HTableDescriptor hTableDescriptor, final Path tableDir, + final FileStatus currentDescriptorFile) + throws IOException { + Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR); + Path lockFile = acquireLockFile(tableInfoDir); + try { + return doWriteTableDescriptor(hTableDescriptor, tableDir, currentDescriptorFile); + } finally { + releaseLockFile(lockFile); + } + } + + /** + * Returns the Path of the newly created file if successful, null otherwise. + */ + private Path doWriteTableDescriptor(final HTableDescriptor hTableDescriptor, final Path tableDir, + final FileStatus currentDescriptorFile) throws IOException { + // Get temporary dir into which we'll first write a file to avoid - // half-written file phenomeon. - Path tmpTableDir = new Path(tableDir, ".tmp"); + // half-written file phenomenon. This directory is never removed + // to avoid removing it out from under a concurrent writer. + Path tmpTableDir = new Path(tableDir, TMP_DIR); + Path tableInfoDir = new Path(tableDir, TABLEINFO_DIR); + // What is current sequenceid? We read the current sequenceid from // the current file. After we read it, another thread could come in and // compete with us writing out next version of file. The below retries // should help in this case some but its hard to do guarantees in face of // concurrent schema edits. - int currentSequenceid = - status == null? 0: getTableInfoSequenceid(status.getPath()); - int sequenceid = currentSequenceid; + int currentSequenceId = currentDescriptorFile == null ? 0 : + getTableInfoSequenceId(currentDescriptorFile.getPath()); + int newSequenceId = currentSequenceId; + // Put arbitrary upperbound on how often we retry int retries = 10; - int retrymax = currentSequenceid + retries; - Path tableInfoPath = null; + int retrymax = currentSequenceId + retries; + Path tableDirPath = null; + Path tableInfoDirPath = null; do { - sequenceid += 1; - Path p = getTableInfoFileName(tmpTableDir, sequenceid); - if (fs.exists(p)) { - LOG.debug(p + " exists; retrying up to " + retries + " times"); + newSequenceId += 1; + String filename = getTableInfoFileName(newSequenceId); + Path tempPath = new Path(tmpTableDir, filename); + if (fs.exists(tempPath)) { + LOG.debug(tempPath + " exists; retrying up to " + retries + " times"); continue; } + tableDirPath = new Path(tableDir, filename); + tableInfoDirPath = new Path(tableInfoDir, filename); try { - writeHTD(fs, p, hTableDescriptor); - tableInfoPath = getTableInfoFileName(tableDir, sequenceid); - if (!fs.rename(p, tableInfoPath)) { - throw new IOException("Failed rename of " + p + " to " + tableInfoPath); - } + // First try to create a file in the canonical table dir location (fail if such a file already exists) + writeThenRenameIntoPlace(hTableDescriptor, tempPath, tableDirPath, false); + // If that succeeded then create the same file in the tableInfo subdir. (remove if such a file already exists) + writeThenRenameIntoPlace(hTableDescriptor, tempPath, tableInfoDirPath, true); } catch (IOException ioe) { // Presume clash of names or something; go around again. LOG.debug("Failed write and/or rename; retrying", ioe); - if (!FSUtils.deleteDirectory(fs, p)) { - LOG.warn("Failed cleanup of " + p); + if (!FSUtils.deleteDirectory(fs, tempPath)) { + LOG.warn("Failed cleanup of " + tempPath); } - tableInfoPath = null; + tableDirPath = null; + tableInfoDirPath = null; continue; } - // Cleanup old schema file. - if (status != null) { - if (!FSUtils.deleteDirectory(fs, status.getPath())) { - LOG.warn("Failed delete of " + status.getPath() + "; continuing"); - } - } break; - } while (sequenceid < retrymax); - return tableInfoPath; + } while (newSequenceId < retrymax); + if (tableDirPath != null) { + // if we succeeded, remove old table info files. + deleteTableDescriptorFiles(tableDir, newSequenceId - 1); + deleteTableDescriptorFiles(tableInfoDir, newSequenceId - 1); + } + return tableDirPath; } - private static void writeHTD(final FileSystem fs, final Path p, - final HTableDescriptor htd) + /** + * Writes a table descriptor into the given temp file, then renames it atomically + * to the destination path. If forceOverwrite is set then it removes any existing + * file at the destination first. + */ + private void writeThenRenameIntoPlace(final HTableDescriptor hTableDescriptor, + final Path tempPath, final Path destPath, final boolean forceOverwrite) + throws IOException { + if (forceOverwrite) { + removeExistingFileIfPresent(tempPath); + } + writeHTD(tempPath, hTableDescriptor); + fs.mkdirs(destPath.getParent()); + if (forceOverwrite) { + removeExistingFileIfPresent(destPath); + } + if (!fs.rename(tempPath, destPath)) { + throw new IOException("Failed rename of " + tempPath + " to " + destPath); + } + LOG.debug("Wrote descriptor into: " + destPath); + } + + private void removeExistingFileIfPresent(final Path path) throws IOException { + if (fs.exists(path)) { + LOG.debug("Removing existing file at: " + path); + if (!fs.delete(path, false)) { + throw new IOException("Failed delete of existing file at: " + path); + } + } + } + + private void writeHTD(final Path p, final HTableDescriptor htd) throws IOException { FSDataOutputStream out = fs.create(p, false); try { @@ -542,45 +700,66 @@ public class FSTableDescriptors implements TableDescriptors { } /** - * Create new HTableDescriptor in HDFS. Happens when we are creating table. - * - * @param htableDescriptor - * @param conf + * Attempts to acquire a lock on the given table descriptor directory by creating a file with + * a well known name. Sleeps for some period of time until it succeeds. + * Returns the path of the created lock file if successful, otherwise it throws an IOException + * if it slept too long and could not acquire the lock. */ - public static boolean createTableDescriptor(final HTableDescriptor htableDescriptor, - Configuration conf) + private Path acquireLockFile(final Path dir) throws IOException { - return createTableDescriptor(htableDescriptor, conf, false); + int maxLockAttempts = conf.getInt("hbase.table.descriptor.lock.attempts", 10); + long sleepMillis = conf.getLong("hbase.table.descriptor.lock.sleep.millis", 1000L); + Path lockFile = new Path(dir, LOCKFILE); + int attempt = 0; + boolean success = false; + do { + attempt++; + try { + FSDataOutputStream out = fs.create(lockFile, false); + out.close(); + LOG.debug("Acquired lock: " + lockFile); + success = true; + break; + } catch (IOException ioe) { + LOG.debug("Unable to lock directory: " + dir + " Sleeping " + sleepMillis + "ms..."); + try { + Thread.sleep(sleepMillis); + } catch (InterruptedException e) { + // continue + } + } + } while (attempt < maxLockAttempts); + if (!success) { + throw new IOException("Failed to acquire lock at: " + lockFile + " - Unable to write table descriptor." + + LOCK_WARNING); + } + return lockFile; } - - /** - * Create new HTableDescriptor in HDFS. Happens when we are creating table. If - * forceCreation is true then even if previous table descriptor is present it - * will be overwritten - * - * @param htableDescriptor - * @param conf - * @param forceCreation True if we are to overwrite existing file. - */ - static boolean createTableDescriptor(final HTableDescriptor htableDescriptor, - final Configuration conf, boolean forceCreation) - throws IOException { - FileSystem fs = FSUtils.getCurrentFileSystem(conf); - return createTableDescriptor(fs, FSUtils.getRootDir(conf), htableDescriptor, - forceCreation); + + private void releaseLockFile(final Path lockFile) { + try { + boolean deleted = fs.delete(lockFile, false); + if (!deleted) { + LOG.error("Failed to delete lock file: " + lockFile); + } else { + LOG.debug("Released lock: " + lockFile); + } + } catch (IOException ioe) { + LOG.error("Error deleting lock file", ioe); + } } + + private static final String LOCK_WARNING = " If you see this message often then a process updating the table descriptor may have" + + " crashed. If you are sure no such process is running, remove all files from this" + + " directory. Remove " + LOCKFILE + " last."; /** * Create new HTableDescriptor in HDFS. Happens when we are creating table. * Used by tests. - * @param fs - * @param htableDescriptor - * @param rootdir + * @return True if we successfully created file. */ - public static boolean createTableDescriptor(FileSystem fs, Path rootdir, - HTableDescriptor htableDescriptor) - throws IOException { - return createTableDescriptor(fs, rootdir, htableDescriptor, false); + public boolean createTableDescriptor(HTableDescriptor htableDescriptor) throws IOException { + return createTableDescriptor(htableDescriptor, false); } /** @@ -588,17 +767,15 @@ public class FSTableDescriptors implements TableDescriptors { * forceCreation is true then even if previous table descriptor is present it * will be overwritten * - * @param fs - * @param htableDescriptor - * @param rootdir - * @param forceCreation * @return True if we successfully created file. */ - public static boolean createTableDescriptor(FileSystem fs, Path rootdir, - HTableDescriptor htableDescriptor, boolean forceCreation) + public boolean createTableDescriptor(HTableDescriptor htableDescriptor, boolean forceCreation) throws IOException { + if (fsreadonly) { + throw new NotImplementedException("Cannot create a table descriptor - in read only mode"); + } FileStatus status = - getTableInfoPath(fs, rootdir, htableDescriptor.getNameAsString()); + getTableInfoPath(htableDescriptor.getNameAsString()); if (status != null) { LOG.info("Current tableInfoPath = " + status.getPath()); if (!forceCreation) { @@ -608,8 +785,8 @@ public class FSTableDescriptors implements TableDescriptors { } } } - Path p = writeTableDescriptor(fs, htableDescriptor, - FSUtils.getTablePath(rootdir, htableDescriptor.getNameAsString()), status); + Path p = writeTableDescriptor(htableDescriptor, + getTableDirectory(htableDescriptor.getNameAsString()), status); return p != null; } } diff --git src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index ec0bd62..ab60322 100644 --- src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -771,6 +771,7 @@ public class HBaseFsck extends Configured implements Tool { } } + FSTableDescriptors fstd = new FSTableDescriptors(getConf()); // serialized table info gathering. for (HbckInfo hbi: hbckInfos) { @@ -795,9 +796,7 @@ public class HBaseFsck extends Configured implements Tool { Path hbaseRoot = FSUtils.getRootDir(getConf()); tablesInfo.put(tableName, modTInfo); try { - HTableDescriptor htd = - FSTableDescriptors.getTableDescriptor(hbaseRoot.getFileSystem(getConf()), - hbaseRoot, tableName); + HTableDescriptor htd = fstd.getTableDescriptorFromFs(tableName); modTInfo.htds.add(htd); } catch (IOException ioe) { if (!orphanTableDirs.containsKey(tableName)) { @@ -848,7 +847,7 @@ public class HBaseFsck extends Configured implements Tool { for (String columnfamimly : columns) { htd.addFamily(new HColumnDescriptor(columnfamimly)); } - FSTableDescriptors.createTableDescriptor(htd, getConf(), true); + new FSTableDescriptors(getConf()).createTableDescriptor(htd, true); return true; } @@ -864,13 +863,13 @@ public class HBaseFsck extends Configured implements Tool { public void fixOrphanTables() throws IOException { if (shouldFixTableOrphans() && !orphanTableDirs.isEmpty()) { - Path hbaseRoot = FSUtils.getRootDir(getConf()); List tmpList = new ArrayList(); tmpList.addAll(orphanTableDirs.keySet()); HTableDescriptor[] htds = getHTableDescriptors(tmpList); Iterator>> iter = orphanTableDirs.entrySet().iterator(); int j = 0; int numFailedCase = 0; + FSTableDescriptors fstd = new FSTableDescriptors(getConf()); while (iter.hasNext()) { Entry> entry = (Entry>) iter.next(); String tableName = entry.getKey(); @@ -879,8 +878,7 @@ public class HBaseFsck extends Configured implements Tool { if (tableName.equals(Bytes.toString(htds[j].getName()))) { HTableDescriptor htd = htds[j]; LOG.info("fixing orphan table: " + tableName + " from cache"); - FSTableDescriptors.createTableDescriptor( - hbaseRoot.getFileSystem(getConf()), hbaseRoot, htd, true); + fstd.createTableDescriptor(htd, true); j++; iter.remove(); } diff --git src/main/java/org/apache/hadoop/hbase/util/HMerge.java src/main/java/org/apache/hadoop/hbase/util/HMerge.java index a685aa7..da6663f 100644 --- src/main/java/org/apache/hadoop/hbase/util/HMerge.java +++ src/main/java/org/apache/hadoop/hbase/util/HMerge.java @@ -148,11 +148,10 @@ class HMerge { this.maxFilesize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE); - this.tabledir = new Path( - fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))), - Bytes.toString(tableName) - ); - this.htd = FSTableDescriptors.getTableDescriptor(this.fs, this.tabledir); + Path hbaseRootDir = fs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR))); + this.tabledir = new Path(hbaseRootDir, Bytes.toString(tableName)); + this.htd = new FSTableDescriptors(this.conf, this.fs, hbaseRootDir) + .getTableDescriptorFromFs(this.tabledir); Path logdir = new Path(tabledir, "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME); Path oldLogDir = new Path(tabledir, HConstants.HREGION_OLDLOGDIR_NAME); diff --git src/main/java/org/apache/hadoop/hbase/util/Merge.java src/main/java/org/apache/hadoop/hbase/util/Merge.java index 1ed4225..d53c660 100644 --- src/main/java/org/apache/hadoop/hbase/util/Merge.java +++ src/main/java/org/apache/hadoop/hbase/util/Merge.java @@ -237,8 +237,8 @@ public class Merge extends Configured implements Tool { if (info2 == null) { throw new NullPointerException("info2 is null using key " + meta2); } - HTableDescriptor htd = FSTableDescriptors.getTableDescriptor(FileSystem.get(getConf()), - this.rootdir, this.tableName); + HTableDescriptor htd = new FSTableDescriptors(getConf(), FileSystem.get(getConf()), this.rootdir) + .getTableDescriptorFromFs(Bytes.toString(this.tableName)); HRegion merged = merge(htd, info1, metaRegion1, info2, metaRegion2); // Now find the meta region which will contain the newly merged region diff --git src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java index 4679cf2..4243a1d 100644 --- src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java +++ src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java @@ -69,9 +69,7 @@ public class TestDrainingServer { TEST_UTIL.createMultiRegionsInMeta(TEST_UTIL.getConfiguration(), htd, HBaseTestingUtility.KEYS); // Make a mark for the table in the filesystem. - FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); - FSTableDescriptors. - createTableDescriptor(fs, FSUtils.getRootDir(TEST_UTIL.getConfiguration()), htd); + new FSTableDescriptors(TEST_UTIL.getConfiguration()).createTableDescriptor(htd); // Assign out the regions we just created. HBaseAdmin admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); MiniHBaseCluster cluster = TEST_UTIL.getMiniHBaseCluster(); diff --git src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java index 6bc7c32..0616419 100644 --- src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java +++ src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java @@ -40,10 +40,11 @@ public class TestFSTableDescriptorForceCreation { final String name = "newTable2"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); Path rootdir = new Path(UTIL.getDataTestDir(), name); + FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); HTableDescriptor htd = new HTableDescriptor(name); assertTrue("Should create new table descriptor", - FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false)); + fstd.createTableDescriptor(htd, false)); } @Test @@ -53,11 +54,11 @@ public class TestFSTableDescriptorForceCreation { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any detrius laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); - TableDescriptors htds = new FSTableDescriptors(fs, rootdir); + FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); HTableDescriptor htd = new HTableDescriptor(name); - htds.add(htd); + fstd.add(htd); assertFalse("Should not create new table descriptor", - FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false)); + fstd.createTableDescriptor(htd, false)); } @Test @@ -66,10 +67,11 @@ public class TestFSTableDescriptorForceCreation { final String name = "createNewTableNew2"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); Path rootdir = new Path(UTIL.getDataTestDir(), name); + FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); HTableDescriptor htd = new HTableDescriptor(name); - FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false); + fstd.createTableDescriptor(htd, false); assertTrue("Should create new table descriptor", - FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, true)); + fstd.createTableDescriptor(htd, true)); } @org.junit.Rule diff --git src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java index 479d904..f30de5f 100644 --- src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java +++ src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java @@ -360,8 +360,9 @@ public class TestMasterFailover { FileSystem filesystem = FileSystem.get(conf); Path rootdir = filesystem.makeQualified( new Path(conf.get(HConstants.HBASE_DIR))); + FSTableDescriptors fstd = new FSTableDescriptors(conf, filesystem, rootdir); // Write the .tableinfo - FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled); + fstd.createTableDescriptor(htdEnabled); HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null); createRegion(hriEnabled, rootdir, conf, htdEnabled); @@ -373,7 +374,7 @@ public class TestMasterFailover { HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable); htdDisabled.addFamily(new HColumnDescriptor(FAMILY)); // Write the .tableinfo - FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdDisabled); + fstd.createTableDescriptor(htdDisabled); HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null); createRegion(hriDisabled, rootdir, conf, htdDisabled); List disabledRegions = TEST_UTIL.createMultiRegionsInMeta( @@ -673,8 +674,9 @@ public class TestMasterFailover { FileSystem filesystem = FileSystem.get(conf); Path rootdir = filesystem.makeQualified( new Path(conf.get(HConstants.HBASE_DIR))); + FSTableDescriptors fstd = new FSTableDescriptors(conf, filesystem, rootdir); // Write the .tableinfo - FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled); + fstd.createTableDescriptor(htdEnabled); HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null); createRegion(hriEnabled, rootdir, conf, htdEnabled); @@ -686,7 +688,7 @@ public class TestMasterFailover { HTableDescriptor htdDisabled = new HTableDescriptor(disabledTable); htdDisabled.addFamily(new HColumnDescriptor(FAMILY)); // Write the .tableinfo - FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdDisabled); + fstd.createTableDescriptor(htdDisabled); HRegionInfo hriDisabled = new HRegionInfo(htdDisabled.getName(), null, null); createRegion(hriDisabled, rootdir, conf, htdDisabled); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java index 6dfba41..60a8983 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java @@ -64,26 +64,26 @@ public class TestHRegionInfo { @Test public void testGetSetOfHTD() throws IOException { HBaseTestingUtility HTU = new HBaseTestingUtility(); - final String tablename = "testGetSetOfHTD"; + final String tablename = "testGetSetOfHTD"; + FSTableDescriptors fstd = new FSTableDescriptors(HTU.getConfiguration()); // Delete the temporary table directory that might still be there from the // previous test run. - FSTableDescriptors.deleteTableDescriptorIfExists(tablename, - HTU.getConfiguration()); + fstd.deleteTableDescriptorIfExists(tablename); HTableDescriptor htd = new HTableDescriptor(tablename); - FSTableDescriptors.createTableDescriptor(htd, HTU.getConfiguration()); - HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testGetSetOfHTD"), + fstd.createTableDescriptor(htd); + HRegionInfo hri = new HRegionInfo(Bytes.toBytes(tablename), HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); HTableDescriptor htd2 = hri.getTableDesc(); - assertTrue(htd.equals(htd2)); + assertEquals(htd, htd2); final String key = "SOME_KEY"; assertNull(htd.getValue(key)); final String value = "VALUE"; htd.setValue(key, value); hri.setTableDesc(htd); HTableDescriptor htd3 = hri.getTableDesc(); - assertTrue(htd.equals(htd3)); + assertEquals(htd, htd3); } @Test diff --git src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index c5dda2f..f9e99c3 100644 --- src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -17,18 +17,30 @@ */ package org.apache.hadoop.hbase.util; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.FileNotFoundException; import java.io.IOException; import java.util.Arrays; +import java.util.Comparator; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableExistsException; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -44,12 +56,12 @@ public class TestFSTableDescriptors { @Test (expected=IllegalArgumentException.class) public void testRegexAgainstOldStyleTableInfo() { - Path p = new Path("/tmp", FSTableDescriptors.TABLEINFO_NAME); - int i = FSTableDescriptors.getTableInfoSequenceid(p); + Path p = new Path("/tmp", FSTableDescriptors.TABLEINFO_FILE_PREFIX); + int i = FSTableDescriptors.getTableInfoSequenceId(p); assertEquals(0, i); // Assert it won't eat garbage -- that it fails p = new Path("/tmp", "abc"); - FSTableDescriptors.getTableInfoSequenceid(p); + FSTableDescriptors.getTableInfoSequenceId(p); } @Test @@ -57,12 +69,13 @@ public class TestFSTableDescriptors { Path testdir = UTIL.getDataTestDir("testCreate"); HTableDescriptor htd = new HTableDescriptor("testCreate"); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - assertTrue(FSTableDescriptors.createTableDescriptor(fs, testdir, htd)); - assertFalse(FSTableDescriptors.createTableDescriptor(fs, testdir, htd)); + FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir); + assertTrue(fstd.createTableDescriptor(htd)); + assertFalse(fstd.createTableDescriptor(htd)); FileStatus [] statuses = fs.listStatus(testdir); assertTrue("statuses.length="+statuses.length, statuses.length == 1); for (int i = 0; i < 10; i++) { - FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); + fstd.updateTableDescriptor(htd); } statuses = fs.listStatus(testdir); assertTrue(statuses.length == 1); @@ -72,53 +85,53 @@ public class TestFSTableDescriptors { } @Test - public void testSequenceidAdvancesOnTableInfo() throws IOException { + public void testSequenceIdAdvancesOnTableInfo() throws IOException { Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo"); HTableDescriptor htd = new HTableDescriptor("testSequenceidAdvancesOnTableInfo"); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - Path p0 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); - int i0 = FSTableDescriptors.getTableInfoSequenceid(p0); - Path p1 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); + FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir); + Path p0 = fstd.updateTableDescriptor(htd); + int i0 = FSTableDescriptors.getTableInfoSequenceId(p0); + Path p1 = fstd.updateTableDescriptor(htd); // Assert we cleaned up the old file. assertTrue(!fs.exists(p0)); - int i1 = FSTableDescriptors.getTableInfoSequenceid(p1); + int i1 = FSTableDescriptors.getTableInfoSequenceId(p1); assertTrue(i1 == i0 + 1); - Path p2 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); + Path p2 = fstd.updateTableDescriptor(htd); // Assert we cleaned up the old file. assertTrue(!fs.exists(p1)); - int i2 = FSTableDescriptors.getTableInfoSequenceid(p2); + int i2 = FSTableDescriptors.getTableInfoSequenceId(p2); assertTrue(i2 == i1 + 1); } @Test public void testFormatTableInfoSequenceId() { - Path p0 = assertWriteAndReadSequenceid(0); + Path p0 = assertWriteAndReadSequenceId(0); // Assert p0 has format we expect. StringBuilder sb = new StringBuilder(); for (int i = 0; i < FSTableDescriptors.WIDTH_OF_SEQUENCE_ID; i++) { sb.append("0"); } - assertEquals(FSTableDescriptors.TABLEINFO_NAME + "." + sb.toString(), + assertEquals(FSTableDescriptors.TABLEINFO_FILE_PREFIX + "." + sb.toString(), p0.getName()); // Check a few more. - Path p2 = assertWriteAndReadSequenceid(2); - Path p10000 = assertWriteAndReadSequenceid(10000); + Path p2 = assertWriteAndReadSequenceId(2); + Path p10000 = assertWriteAndReadSequenceId(10000); // Get a .tablinfo that has no sequenceid suffix. - Path p = new Path(p0.getParent(), FSTableDescriptors.TABLEINFO_NAME); + Path p = new Path(p0.getParent(), FSTableDescriptors.TABLEINFO_FILE_PREFIX); FileStatus fs = new FileStatus(0, false, 0, 0, 0, p); FileStatus fs0 = new FileStatus(0, false, 0, 0, 0, p0); FileStatus fs2 = new FileStatus(0, false, 0, 0, 0, p2); FileStatus fs10000 = new FileStatus(0, false, 0, 0, 0, p10000); - FSTableDescriptors.FileStatusFileNameComparator comparator = - new FSTableDescriptors.FileStatusFileNameComparator(); + Comparator comparator = FSTableDescriptors.TABLEINFO_FILESTATUS_COMPARATOR; assertTrue(comparator.compare(fs, fs0) > 0); assertTrue(comparator.compare(fs0, fs2) > 0); assertTrue(comparator.compare(fs2, fs10000) > 0); } - private Path assertWriteAndReadSequenceid(final int i) { - Path p = FSTableDescriptors.getTableInfoFileName(new Path("/tmp"), i); - int ii = FSTableDescriptors.getTableInfoSequenceid(p); + private Path assertWriteAndReadSequenceId(final int i) { + Path p = new Path("/tmp", FSTableDescriptors.getTableInfoFileName(i)); + int ii = FSTableDescriptors.getTableInfoSequenceId(p); assertEquals(i, ii); return p; } @@ -129,7 +142,7 @@ public class TestFSTableDescriptors { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any detrius laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); - TableDescriptors htds = new FSTableDescriptors(fs, rootdir); + TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); HTableDescriptor htd = new HTableDescriptor(name); htds.add(htd); assertNotNull(htds.remove(htd.getNameAsString())); @@ -141,31 +154,26 @@ public class TestFSTableDescriptors { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); HTableDescriptor htd = new HTableDescriptor(name); Path rootdir = UTIL.getDataTestDir(name); - createHTDInFS(fs, rootdir, htd); - HTableDescriptor htd2 = - FSTableDescriptors.getTableDescriptor(fs, rootdir, htd.getNameAsString()); + FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); + fstd.createTableDescriptor(htd); + HTableDescriptor htd2 = fstd.getTableDescriptorFromFs(htd.getNameAsString()); assertTrue(htd.equals(htd2)); } - private void createHTDInFS(final FileSystem fs, Path rootdir, - final HTableDescriptor htd) - throws IOException { - FSTableDescriptors.createTableDescriptor(fs, rootdir, htd); - } - @Test public void testHTableDescriptors() throws IOException, InterruptedException { final String name = "testHTableDescriptors"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any debris laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); + FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); final int count = 10; // Write out table infos. for (int i = 0; i < count; i++) { HTableDescriptor htd = new HTableDescriptor(name + i); - createHTDInFS(fs, rootdir, htd); + fstd.createTableDescriptor(htd); } - FSTableDescriptors htds = new FSTableDescriptors(fs, rootdir) { + FSTableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir) { @Override public HTableDescriptor get(byte[] tablename) throws TableExistsException, FileNotFoundException, IOException { @@ -183,7 +191,7 @@ public class TestFSTableDescriptors { for (int i = 0; i < count; i++) { HTableDescriptor htd = new HTableDescriptor(name + i); htd.addFamily(new HColumnDescriptor("" + i)); - FSTableDescriptors.updateHTableDescriptor(fs, rootdir, htd); + fstd.updateTableDescriptor(htd); } // Wait a while so mod time we write is for sure different. Thread.sleep(100); @@ -208,7 +216,7 @@ public class TestFSTableDescriptors { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any detrius laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); - TableDescriptors htds = new FSTableDescriptors(fs, rootdir); + TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); assertNull("There shouldn't be any HTD for this table", htds.get("NoSuchTable")); } @@ -219,7 +227,7 @@ public class TestFSTableDescriptors { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any detrius laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); - TableDescriptors htds = new FSTableDescriptors(fs, rootdir); + TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); HTableDescriptor htd = new HTableDescriptor(name); htds.add(htd); htds.add(htd); @@ -229,7 +237,7 @@ public class TestFSTableDescriptors { @Test public void testTableInfoFileStatusComparator() { FileStatus bare = - new FileStatus(0, false, 0, 0, -1, new Path("/tmp", FSTableDescriptors.TABLEINFO_NAME)); + new FileStatus(0, false, 0, 0, -1, new Path("/tmp", FSTableDescriptors.TABLEINFO_FILE_PREFIX)); FileStatus future = new FileStatus(0, false, 0, 0, -1, new Path("/tmp/tablinfo." + System.currentTimeMillis())); @@ -239,8 +247,7 @@ public class TestFSTableDescriptors { FileStatus [] alist = {bare, future, farFuture}; FileStatus [] blist = {bare, farFuture, future}; FileStatus [] clist = {farFuture, bare, future}; - FSTableDescriptors.FileStatusFileNameComparator c = - new FSTableDescriptors.FileStatusFileNameComparator(); + Comparator c = FSTableDescriptors.TABLEINFO_FILESTATUS_COMPARATOR; Arrays.sort(alist, c); Arrays.sort(blist, c); Arrays.sort(clist, c); @@ -256,7 +263,7 @@ public class TestFSTableDescriptors { public void testReadingArchiveDirectoryFromFS() throws IOException { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); try { - new FSTableDescriptors(fs, FSUtils.getRootDir(UTIL.getConfiguration())) + new FSTableDescriptors(UTIL.getConfiguration(), fs, FSUtils.getRootDir(UTIL.getConfiguration())) .get(HConstants.HFILE_ARCHIVE_DIRECTORY); fail("Shouldn't be able to read a table descriptor for the archive directory."); } catch (IOException e) { diff --git src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java index 3771d9a..a5cb08e 100644 --- src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java +++ src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java @@ -421,7 +421,8 @@ public class TestHBaseFsck { Path hbaseTableDir = new Path(conf.get(HConstants.HBASE_DIR) + "/" + table ); fs = hbaseTableDir.getFileSystem(conf); - FileStatus status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir); + FSTableDescriptors fstd = new FSTableDescriptors(conf); + FileStatus status = fstd.getTableInfoPath(hbaseTableDir); tableinfo = status.getPath(); fs.rename(tableinfo, new Path("/.tableinfo")); @@ -433,7 +434,7 @@ public class TestHBaseFsck { hbck = doFsck(conf, true); assertNoErrors(hbck); status = null; - status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir); + status = fstd.getTableInfoPath(hbaseTableDir); assertNotNull(status); HTableDescriptor htd = admin.getTableDescriptor(table.getBytes()); @@ -448,7 +449,7 @@ public class TestHBaseFsck { hbck = doFsck(conf, true); assertNoErrors(hbck); status = null; - status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir); + status = fstd.getTableInfoPath(hbaseTableDir); assertNotNull(status); htd = admin.getTableDescriptor(table.getBytes()); assertEquals(htd.getValue("NOT_DEFAULT"), "true"); @@ -1077,9 +1078,7 @@ public class TestHBaseFsck { // Create a ZKW to use in the test ZooKeeperWatcher zkw = HBaseTestingUtility.getZooKeeperWatcher(TEST_UTIL); - FileSystem filesystem = FileSystem.get(conf); - Path rootdir = filesystem.makeQualified(new Path(conf - .get(HConstants.HBASE_DIR))); + FSTableDescriptors fstd = new FSTableDescriptors(conf); byte[][] SPLIT_KEYS = new byte[][] { new byte[0], Bytes.toBytes("aaa"), Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), Bytes.toBytes("ddd") }; @@ -1087,8 +1086,7 @@ public class TestHBaseFsck { htdDisabled.addFamily(new HColumnDescriptor(FAM)); // Write the .tableinfo - FSTableDescriptors - .createTableDescriptor(filesystem, rootdir, htdDisabled); + fstd.createTableDescriptor(htdDisabled); List disabledRegions = TEST_UTIL.createMultiRegionsInMeta( TEST_UTIL.getConfiguration(), htdDisabled, SPLIT_KEYS); diff --git src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java index a9948c7..52a2dd3 100644 --- src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java +++ src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java @@ -95,7 +95,7 @@ public class TestMergeTable { // Create regions and populate them at same time. Create the tabledir // for them first. - FSTableDescriptors.createTableDescriptor(fs, rootdir, desc); + new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir).createTableDescriptor(desc); HRegion [] regions = { createRegion(desc, null, row_70001, 1, 70000, rootdir), createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir), diff --git src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java index f5f0ac7..830ddee 100644 --- src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java +++ src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java @@ -138,7 +138,7 @@ public class TestMergeTool extends HBaseTestCase { try { // Create root and meta regions createRootAndMetaRegions(); - FSTableDescriptors.createTableDescriptor(this.fs, this.testDir, this.desc); + new FSTableDescriptors(conf, this.fs, this.testDir).createTableDescriptor(this.desc); /* * Create the regions we will merge */