From aca23fe8c8430b4c0b71e7b655b854ff00b77edd Mon Sep 17 00:00:00 2001 From: Esteban Gutierrez Date: Thu, 6 Nov 2014 11:39:34 -0800 Subject: [PATCH] HBASE-12219 Cache more efficiently getAll() and get() in FSTableDescriptors --- .../org/apache/hadoop/hbase/TableDescriptors.java | 10 + .../org/apache/hadoop/hbase/master/HMaster.java | 19 +- .../hbase/master/handler/CreateTableHandler.java | 6 +- .../hadoop/hbase/regionserver/HRegionServer.java | 2 +- .../hadoop/hbase/util/FSTableDescriptors.java | 274 ++++++++------------- .../hadoop/hbase/master/TestCatalogJanitor.java | 8 + .../hadoop/hbase/thrift/TestThriftServer.java | 3 +- .../hadoop/hbase/util/TestFSTableDescriptors.java | 22 +- 8 files changed, 154 insertions(+), 190 deletions(-) diff --git a/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java b/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java index 58c211b..e0adeb6 100644 --- a/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java +++ b/src/main/java/org/apache/hadoop/hbase/TableDescriptors.java @@ -65,4 +65,14 @@ public interface TableDescriptors { */ public HTableDescriptor remove(final String tablename) throws IOException; + + /** + * Enables the tabledescriptor cache + */ + void setCacheOn() throws IOException; + + /** + * Disables the tabledescriptor cache + */ + void setCacheOff() throws IOException; } diff --git a/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 82fd1f6..bc74307 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -237,6 +237,8 @@ Server { private MasterCoprocessorHost cpHost; private final ServerName serverName; + private final boolean preLoadTableDescriptors; + private TableDescriptors tableDescriptors; // Time stamps for when a hmaster was started and when it became active @@ -349,6 +351,10 @@ Server { this.rpcServer.startThreads(); this.metrics = new MasterMetrics(getServerName().toString()); + // preload table descriptor at startup + this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true); + + // Health checker thread. int sleepTime = this.conf.getInt(HConstants.HEALTH_CHORE_WAKE_FREQ, HConstants.DEFAULT_THREAD_WAKE_FREQUENCY); @@ -576,6 +582,15 @@ Server { new FSTableDescriptors(this.fileSystemManager.getFileSystem(), this.fileSystemManager.getRootDir()); + // enable table descriptors cache + this.tableDescriptors.setCacheOn(); + + // warm-up HTDs cache on master initialization + if (preLoadTableDescriptors) { + status.setStatus("Pre-loading table descriptors"); + this.tableDescriptors.getAll(); + } + // publish cluster ID status.setStatus("Publishing Cluster ID in ZooKeeper"); ClusterId.setClusterId(this.zooKeeper, fileSystemManager.getClusterId()); @@ -1610,7 +1625,7 @@ Server { } return info.getInfoPort(); } - + /** * @return array of coprocessor SimpleNames. */ @@ -1956,7 +1971,7 @@ Server { throws IOException { List descriptors = new ArrayList(tableNames.size()); - + boolean bypass = false; if (this.cpHost != null) { bypass = this.cpHost.preGetTableDescriptors(tableNames, descriptors); diff --git a/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java b/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java index 92d28c3..b872a01 100644 --- a/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java +++ b/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.catalog.MetaEditor; import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.master.AssignmentManager; +import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.ServerManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; @@ -185,7 +186,7 @@ public class CreateTableHandler extends EventHandler { throw new IOException("Unable to move table from temp=" + tempTableDir + " to hbase root=" + tableDir); } - + if (regionInfos != null && regionInfos.size() > 0) { // 4. Add regions to META addRegionsToMeta(this.catalogTracker, regionInfos); @@ -211,6 +212,9 @@ public class CreateTableHandler extends EventHandler { throw new IOException("Unable to ensure that " + tableName + " will be" + " enabled because of a ZooKeeper issue", e); } + + // 7. Update the tabledescriptor cache. + ((HMaster) this.server).getTableDescriptors().get(tableName); } /** diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index eadc9e8..4c83112 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1118,7 +1118,7 @@ public class HRegionServer implements HRegionInterface, HBaseRPCErrorHandler, // Get fs instance used by this RS this.fs = new HFileSystem(this.conf, this.useHBaseChecksum); this.rootDir = new Path(this.conf.get(HConstants.HBASE_DIR)); - this.tableDescriptors = new FSTableDescriptors(this.fs, this.rootDir, true); + this.tableDescriptors = new FSTableDescriptors(this.fs, this.rootDir, true, false); this.hlog = setupWALAndReplication(); // Init in here rather than in constructor after thread name has been set this.metrics = new RegionServerMetrics(); diff --git a/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index eadd30c..ff3723a 100644 --- a/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -49,7 +49,7 @@ import org.apache.hadoop.hbase.TableInfoMissingException; * passed filesystem. It expects descriptors to be in a file under the * table's directory in FS. Can be read-only -- i.e. does not modify * the filesystem or can be read and write. - * + * *

Also has utility for keeping up the table descriptors tableinfo file. * The table schema file is kept under the table directory in the filesystem. * It has a {@link #TABLEINFO_NAME} prefix and then a suffix that is the @@ -66,6 +66,9 @@ public class FSTableDescriptors implements TableDescriptors { private final FileSystem fs; private final Path rootdir; private final boolean fsreadonly; + private volatile boolean usecache; + private volatile boolean fsvisited; + long cachehits = 0; long invocations = 0; @@ -75,39 +78,11 @@ public class FSTableDescriptors implements TableDescriptors { // This cache does not age out the old stuff. Thinking is that the amount // of data we keep up in here is so small, no need to do occasional purge. // TODO. - private final Map cache = - new ConcurrentHashMap(); - - /** - * Data structure to cache a table descriptor, the time it was modified, - * and the time the table directory was modified. - */ - static class TableDescriptorModtime { - private final HTableDescriptor descriptor; - private final long modtime; - private final long dirmodtime; - - TableDescriptorModtime(final long modtime, final long dirmodtime, final HTableDescriptor htd) { - this.descriptor = htd; - this.modtime = modtime; - this.dirmodtime = dirmodtime; - } - - long getModtime() { - return this.modtime; - } - - long getDirModtime() { - return this.dirmodtime; - } - - HTableDescriptor getTableDescriptor() { - return this.descriptor; - } - } + private final Map cache = + new ConcurrentHashMap(); public FSTableDescriptors(final FileSystem fs, final Path rootdir) { - this(fs, rootdir, false); + this(fs, rootdir, false, true); } /** @@ -117,11 +92,26 @@ public class FSTableDescriptors implements TableDescriptors { * operations; i.e. on remove, we do not do delete in fs. */ public FSTableDescriptors(final FileSystem fs, final Path rootdir, - final boolean fsreadOnly) { + final boolean fsreadOnly, final boolean usecache) { super(); this.fs = fs; this.rootdir = rootdir; this.fsreadonly = fsreadOnly; + this.usecache = usecache; + } + + public void setCacheOn() throws IOException { + this.cache.clear(); + this.usecache = true; + } + + public void setCacheOff() throws IOException { + this.usecache = false; + this.cache.clear(); + } + + public boolean isUsecache() { + return this.usecache; } /* (non-Javadoc) @@ -154,46 +144,31 @@ public class FSTableDescriptors implements TableDescriptors { throw new IOException("No descriptor found for table = " + tablename); } - // Look in cache of descriptors. - TableDescriptorModtime cachedtdm = this.cache.get(tablename); - - if (cachedtdm != null) { - // Check mod time has not changed (this is trip to NN). - // First check directory modtime as it doesn't require a scan of the full table directory - long tableDirModtime = getTableDirModtime(fs, this.rootdir, tablename); - boolean cachehit = false; - if (tableDirModtime <= cachedtdm.getDirModtime()) { - // table dir not changed since our cached entry - cachehit = true; - } else if (getTableInfoModtime(this.fs, this.rootdir, tablename) <= cachedtdm.getModtime()) { - // the table dir has changed (perhaps a region split) but the info file itself has not - // so the cached descriptor is good, we just need to update the entry - this.cache.put(tablename, new TableDescriptorModtime(cachedtdm.getModtime(), - tableDirModtime, cachedtdm.getTableDescriptor())); - cachehit = true; - } // else table info file has been changed, need to read it - if (cachehit) { + if (usecache) { + // Look in cache of descriptors. + HTableDescriptor cachedtdm = this.cache.get(tablename); + if (cachedtdm != null) { cachehits++; - return cachedtdm.getTableDescriptor(); + return cachedtdm; } - } - - TableDescriptorModtime tdmt = null; + } + HTableDescriptor tdmt = null; try { - tdmt = getTableDescriptorModtime(this.fs, this.rootdir, tablename, true); + tdmt = getTableDescriptorFromFs(fs, rootdir, tablename); } catch (NullPointerException e) { LOG.debug("Exception during readTableDecriptor. Current table name = " - + tablename, e); + + tablename, e); } catch (IOException ioe) { LOG.debug("Exception during readTableDecriptor. Current table name = " - + tablename, ioe); + + tablename, ioe); } - - if (tdmt != null) { + // last HTD written wins + if (usecache && tdmt != null) { this.cache.put(tablename, tdmt); } - return tdmt == null ? null : tdmt.getTableDescriptor(); - } + + return tdmt; + } /* (non-Javadoc) * @see org.apache.hadoop.hbase.TableDescriptors#getTableDescriptors(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.fs.Path) @@ -202,18 +177,36 @@ public class FSTableDescriptors implements TableDescriptors { public Map getAll() throws IOException { Map htds = new TreeMap(); - List tableDirs = FSUtils.getTableDirs(fs, rootdir); - for (Path d: tableDirs) { - HTableDescriptor htd = null; - try { - htd = get(d.getName()); - } catch (FileNotFoundException fnfe) { - // inability of retrieving one HTD shouldn't stop getting the remaining - LOG.warn("Trouble retrieving htd", fnfe); + if (fsvisited && usecache) { + for (Map.Entry entry: this.cache.entrySet()) { + htds.put(entry.getKey().toString(), entry.getValue()); + } + // add hbase:meta to the response + // htds.put(HTableDescriptor.META_TABLEDESC.getNameAsString(), + // HTableDescriptor.META_TABLEDESC); + } else { + LOG.debug("Fetching table descriptors from the filesystem."); + boolean allvisited = true; + for (Path d : FSUtils.getTableDirs(fs, rootdir)) { + HTableDescriptor htd = null; + try { + htd = get(d.getName()); + } catch (FileNotFoundException fnfe) { + // inability of retrieving one HTD shouldn't stop getting the remaining + LOG.warn("Trouble retrieving htd", fnfe); + } + if (htd == null) { + allvisited = false; + continue; + } else { + htds.put(htd.getNameAsString(), htd); + } + if (htds.size() == 0) { + allvisited = false; // no user tables + } + fsvisited = allvisited; } - if (htd == null) continue; - htds.put(d.getName(), htd); } return htds; } @@ -230,10 +223,6 @@ public class FSTableDescriptors implements TableDescriptors { throw new NotImplementedException(); } if (!this.fsreadonly) updateHTableDescriptor(this.fs, this.rootdir, htd); - String tableName = htd.getNameAsString(); - long modtime = getTableInfoModtime(this.fs, this.rootdir, tableName); - long dirmodtime = getTableDirModtime(this.fs, this.rootdir, tableName); - this.cache.put(tableName, new TableDescriptorModtime(modtime, dirmodtime, htd)); } @Override @@ -247,13 +236,13 @@ public class FSTableDescriptors implements TableDescriptors { } } } - TableDescriptorModtime tdm = this.cache.remove(tablename); - return tdm == null ? null : tdm.getTableDescriptor(); + HTableDescriptor descriptor = this.cache.remove(tablename); + return descriptor; } /** * Checks if .tableinfo exists for given table - * + * * @param fs file system * @param rootdir root directory of HBase installation * @param tableName name of table @@ -372,29 +361,6 @@ public class FSTableDescriptors implements TableDescriptors { TABLEINFO_NAME + "." + formatTableInfoSequenceId(sequenceid)); } - static long getTableDirModtime(final FileSystem fs, final Path rootdir, - final String tableName) - throws IOException { - Path tabledir = FSUtils.getTablePath(rootdir, tableName); - FileStatus status = fs.getFileStatus(tabledir); - return status == null? 0: status.getModificationTime(); - } - - /** - * @param fs - * @param rootdir - * @param tableName - * @return Modification time for the table {@link #TABLEINFO_NAME} file - * or 0 if no tableinfo file found. - * @throws IOException - */ - static long getTableInfoModtime(final FileSystem fs, final Path rootdir, - final String tableName) - throws IOException { - FileStatus status = getTableInfoPath(fs, rootdir, tableName); - return status == null? 0: status.getModificationTime(); - } - /** * Returns the latest table descriptor for the given table directly from the file system * if it exists, bypassing the local cache. @@ -433,82 +399,37 @@ public class FSTableDescriptors implements TableDescriptors { return hTableDescriptor; } - /** - * Get HTD from HDFS. - * @param fs - * @param hbaseRootDir - * @param tableName - * @return Descriptor or null if none found. - * @throws IOException + /** + * Returns the latest table descriptor for the table located at the given directory + * directly from the file system if it exists. + * @throws TableInfoMissingException if there is no descriptor */ - public static HTableDescriptor getTableDescriptor(FileSystem fs, - Path hbaseRootDir, byte[] tableName) + public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir) throws IOException { - HTableDescriptor htd = null; - try { - TableDescriptorModtime tdmt = - getTableDescriptorModtime(fs, hbaseRootDir, Bytes.toString(tableName), false); - htd = tdmt == null ? null : tdmt.getTableDescriptor(); - } catch (NullPointerException e) { - LOG.debug("Exception during readTableDecriptor. Current table name = " - + Bytes.toString(tableName), e); - } - return htd; - } - - static HTableDescriptor getTableDescriptor(FileSystem fs, - Path hbaseRootDir, String tableName) throws NullPointerException, IOException { - TableDescriptorModtime tdmt = getTableDescriptorModtime(fs, hbaseRootDir, tableName, false); - return tdmt == null ? null : tdmt.getTableDescriptor(); + return getTableDescriptorFromFs(fs, tableDir); } - static TableDescriptorModtime getTableDescriptorModtime(FileSystem fs, - Path hbaseRootDir, String tableName, boolean readDirModtime) - throws NullPointerException, IOException{ - // ignore both -ROOT- and .META. tables - if (Bytes.compareTo(Bytes.toBytes(tableName), HConstants.ROOT_TABLE_NAME) == 0 - || Bytes.compareTo(Bytes.toBytes(tableName), HConstants.META_TABLE_NAME) == 0) { - return null; - } - return getTableDescriptorModtime(fs, FSUtils.getTablePath(hbaseRootDir, tableName), readDirModtime); + /** + * Returns the latest table descriptor for the table located at the given directory + * directly from the file system if it exists. + * @throws TableInfoMissingException if there is no descriptor + */ + public static HTableDescriptor getTableDescriptor(FileSystem fs, Path hbaseRootDir, byte[] tableDir) + throws IOException { + return getTableDescriptorFromFs(fs, FSUtils.getTablePath(hbaseRootDir, tableDir)); } /** - * @param fs filesystem - * @param tableDir path to table directory - * @param readDirModtime true if dirmodtime should be read also - * @return TableDescriptorModtime or null if no table descriptor was found - * at the specified path - * @throws IOException + * Returns the latest table descriptor for the table located at the given directory + * directly from the file system if it exists. + * @throws TableInfoMissingException if there is no descriptor */ - static TableDescriptorModtime getTableDescriptorModtime(FileSystem fs, Path tableDir, boolean readDirModtime) - throws NullPointerException, IOException { - if (tableDir == null) throw new NullPointerException(); - FileStatus status = getTableInfoPath(fs, tableDir); - if (status == null) { - return null; - } - FSDataInputStream fsDataInputStream = fs.open(status.getPath()); - HTableDescriptor hTableDescriptor = null; - try { - hTableDescriptor = new HTableDescriptor(); - hTableDescriptor.readFields(fsDataInputStream); - } finally { - fsDataInputStream.close(); - } - long dirModtime = 0; - if (readDirModtime) { - dirModtime = fs.getFileStatus(tableDir).getModificationTime(); - } - return new TableDescriptorModtime(status.getModificationTime(), dirModtime, hTableDescriptor); - } - - public static HTableDescriptor getTableDescriptor(FileSystem fs, Path tableDir) - throws IOException, NullPointerException { - TableDescriptorModtime tdmt = getTableDescriptorModtime(fs, tableDir, false); - return tdmt == null? null: tdmt.getTableDescriptor(); + public static HTableDescriptor getTableDescriptor(FileSystem fs, Path hbaseRootDir, String tableDir) + throws IOException { + return getTableDescriptorFromFs(fs, FSUtils.getTablePath(hbaseRootDir, tableDir)); } - + + /** * Update table descriptor @@ -518,7 +439,7 @@ public class FSTableDescriptors implements TableDescriptors { * @return New tableinfo or null if we failed update. * @throws IOException Thrown if failed update. */ - static Path updateHTableDescriptor(FileSystem fs, Path rootdir, + Path updateHTableDescriptor(FileSystem fs, Path rootdir, HTableDescriptor hTableDescriptor) throws IOException { Path tableDir = FSUtils.getTablePath(rootdir, hTableDescriptor.getName()); @@ -526,6 +447,9 @@ public class FSTableDescriptors implements TableDescriptors { getTableInfoPath(fs, tableDir)); if (p == null) throw new IOException("Failed update"); LOG.info("Updated tableinfo=" + p); + if (usecache) { + this.cache.put(hTableDescriptor.getNameAsString(), hTableDescriptor); + } return p; } @@ -549,7 +473,7 @@ public class FSTableDescriptors implements TableDescriptors { * @param tableDir * @param status * @return Descriptor file or null if we failed write. - * @throws IOException + * @throws IOException */ private static Path writeTableDescriptor(final FileSystem fs, final HTableDescriptor hTableDescriptor, final Path tableDir, @@ -619,7 +543,7 @@ public class FSTableDescriptors implements TableDescriptors { /** * Create new HTableDescriptor in HDFS. Happens when we are creating table. - * + * * @param htableDescriptor * @param conf */ @@ -633,7 +557,7 @@ public class FSTableDescriptors implements TableDescriptors { * Create new HTableDescriptor in HDFS. Happens when we are creating table. If * forceCreation is true then even if previous table descriptor is present it * will be overwritten - * + * * @param htableDescriptor * @param conf * @param forceCreation True if we are to overwrite existing file. @@ -663,7 +587,7 @@ public class FSTableDescriptors implements TableDescriptors { * Create new HTableDescriptor in HDFS. Happens when we are creating table. If * forceCreation is true then even if previous table descriptor is present it * will be overwritten - * + * * @param fs * @param htableDescriptor * @param rootdir diff --git a/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index 13a3c27..06cd45a 100644 --- a/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ b/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -263,6 +263,14 @@ public class TestCatalogJanitor { @Override public void add(HTableDescriptor htd) throws IOException { } + + @Override + public void setCacheOn() throws IOException { + } + + @Override + public void setCacheOff() throws IOException { + } }; } diff --git a/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java b/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java index 0cd4620..6b88710 100644 --- a/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java +++ b/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java @@ -186,7 +186,8 @@ public class TestThriftServer { assertEquals(handler.getTableNames().size(), 1); assertEquals(handler.getColumnDescriptors(tableAname).size(), 2); assertTrue(handler.isTableEnabled(tableAname)); - handler.createTable(tableBname, new ArrayList()); + handler.createTable(tableBname, getColumnDescriptors()); + assertTrue(handler.isTableEnabled(tableBname)); assertEquals(handler.getTableNames().size(), 2); } diff --git a/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index c5dda2f..2e02082 100644 --- a/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -57,12 +57,13 @@ public class TestFSTableDescriptors { Path testdir = UTIL.getDataTestDir("testCreate"); HTableDescriptor htd = new HTableDescriptor("testCreate"); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - assertTrue(FSTableDescriptors.createTableDescriptor(fs, testdir, htd)); - assertFalse(FSTableDescriptors.createTableDescriptor(fs, testdir, htd)); + FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir); + assertTrue(fstd.createTableDescriptor(fs, testdir, htd)); + assertFalse(fstd.createTableDescriptor(fs, testdir, htd)); FileStatus [] statuses = fs.listStatus(testdir); assertTrue("statuses.length="+statuses.length, statuses.length == 1); for (int i = 0; i < 10; i++) { - FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); + fstd.updateHTableDescriptor(fs, testdir, htd); } statuses = fs.listStatus(testdir); assertTrue(statuses.length == 1); @@ -76,17 +77,18 @@ public class TestFSTableDescriptors { Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo"); HTableDescriptor htd = new HTableDescriptor("testSequenceidAdvancesOnTableInfo"); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - Path p0 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); - int i0 = FSTableDescriptors.getTableInfoSequenceid(p0); - Path p1 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); + FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir); + Path p0 = fstd.updateHTableDescriptor(fs, testdir, htd); + int i0 = fstd.getTableInfoSequenceid(p0); + Path p1 = fstd.updateHTableDescriptor(fs, testdir, htd); // Assert we cleaned up the old file. assertTrue(!fs.exists(p0)); - int i1 = FSTableDescriptors.getTableInfoSequenceid(p1); + int i1 = fstd.getTableInfoSequenceid(p1); assertTrue(i1 == i0 + 1); - Path p2 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); + Path p2 = fstd.updateHTableDescriptor(fs, testdir, htd); // Assert we cleaned up the old file. assertTrue(!fs.exists(p1)); - int i2 = FSTableDescriptors.getTableInfoSequenceid(p2); + int i2 = fstd.getTableInfoSequenceid(p2); assertTrue(i2 == i1 + 1); } @@ -183,7 +185,7 @@ public class TestFSTableDescriptors { for (int i = 0; i < count; i++) { HTableDescriptor htd = new HTableDescriptor(name + i); htd.addFamily(new HColumnDescriptor("" + i)); - FSTableDescriptors.updateHTableDescriptor(fs, rootdir, htd); + htds.updateHTableDescriptor(fs, rootdir, htd); } // Wait a while so mod time we write is for sure different. Thread.sleep(100); -- 1.8.4