diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index a0ab484..8403dc0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -1350,49 +1350,6 @@ public class HTableDescriptor implements Comparable { new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString())))); } - /** Table descriptor for hbase:meta catalog table - * Deprecated, use TableDescriptors#get(TableName.META_TABLE) or - * Admin#getTableDescriptor(TableName.META_TABLE) instead. - */ - @Deprecated - public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor( - TableName.META_TABLE_NAME, - new HColumnDescriptor[] { - new HColumnDescriptor(HConstants.CATALOG_FAMILY) - // Ten is arbitrary number. Keep versions to help debugging. - .setMaxVersions(10) - .setInMemory(true) - .setBlocksize(8 * 1024) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. - .setBloomFilterType(BloomType.NONE) - // Enable cache of data blocks in L1 if more than one caching tier deployed: - // e.g. if using CombinedBlockCache (BucketCache). - .setCacheDataInL1(true), - new HColumnDescriptor(HConstants.TABLE_FAMILY) - // Ten is arbitrary number. Keep versions to help debugging. - .setMaxVersions(10) - .setInMemory(true) - .setBlocksize(8 * 1024) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. - .setBloomFilterType(BloomType.NONE) - // Enable cache of data blocks in L1 if more than one caching tier deployed: - // e.g. if using CombinedBlockCache (BucketCache). - .setCacheDataInL1(true) - }); - - static { - try { - META_TABLEDESC.addCoprocessor( - "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint", - null, Coprocessor.PRIORITY_SYSTEM, null); - } catch (IOException ex) { - //LOG.warn("exception in loading coprocessor for the hbase:meta table"); - throw new RuntimeException(ex); - } - } - public final static String NAMESPACE_FAMILY_INFO = "info"; public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO); public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d"); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index 4ad9eac..cb410d1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -561,9 +561,6 @@ public class HTable implements HTableInterface { public HTableDescriptor getTableDescriptor() throws IOException { // TODO: This is the same as HBaseAdmin.getTableDescriptor(). Only keep one. if (tableName == null) return null; - if (tableName.equals(TableName.META_TABLE_NAME)) { - return HTableDescriptor.META_TABLEDESC; - } HTableDescriptor htd = executeMasterCallable( new MasterCallable(getConnection()) { @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 19e251a..68c73ae 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1006,9 +1006,7 @@ public final class HConstants { /** Directories that are not HBase user table directories */ public static final List HBASE_NON_USER_TABLE_DIRS = - Collections.unmodifiableList(Arrays.asList((String[])ArrayUtils.addAll( - new String[] { TableName.META_TABLE_NAME.getNameAsString() }, - HBASE_NON_TABLE_DIRS.toArray()))); + Collections.unmodifiableList(HBASE_NON_TABLE_DIRS); /** Health script related settings. */ public static final String HEALTH_SCRIPT_LOC = "hbase.node.health.script.location"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 78e4c11..a3f466c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -466,12 +466,23 @@ public class MasterFileSystem { } // Create tableinfo-s for hbase:meta if not already there. - // assume, created table descriptor is for enabling table - // meta table is a system table, so descriptors are predefined, - // we should get them from registry. + + // Meta table descriptor stored in the hdfs. But we need to + // be sure that it has all recent changes, and it actually + // produced from config. So what we can do here is + // overwrite it if it differ from what we are expecting. FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd); - fsd.createTableDescriptor( - new TableDescriptor(fsd.get(TableName.META_TABLE_NAME))); + HTableDescriptor compiledMetaDescriptor = TableDescriptor.metaTableDescriptor(c); + boolean exists = fsd.createTableDescriptor(compiledMetaDescriptor); + if (exists) { + HTableDescriptor storedMetaDescriptor = fsd.get(compiledMetaDescriptor.getTableName()); + if (!storedMetaDescriptor.equals(compiledMetaDescriptor)) { + LOG.warn( + "META table descriptor differs from what produced from config, META table " + + "descriptor will be overwritten."); + fsd.add(compiledMetaDescriptor); + } + } return rd; } @@ -511,7 +522,7 @@ public class MasterFileSystem { // not make it in first place. Turn off block caching for bootstrap. // Enable after. HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO); - HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME); + HTableDescriptor metaDescriptor = TableDescriptor.metaTableDescriptor(c); setInfoFamilyCachingForMeta(metaDescriptor, false); HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor, null); setInfoFamilyCachingForMeta(metaDescriptor, true); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index cce37d7..8283be8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -92,11 +92,6 @@ public class FSTableDescriptors implements TableDescriptors { new ConcurrentHashMap(); /** - * Table descriptor for hbase:meta catalog table - */ - private final HTableDescriptor metaTableDescritor; - - /** * Construct a FSTableDescriptors instance using the hbase root dir of the given * conf and the filesystem where that root dir lives. * This instance can do write operations (is not read only). @@ -121,8 +116,6 @@ public class FSTableDescriptors implements TableDescriptors { this.rootdir = rootdir; this.fsreadonly = fsreadonly; this.usecache = usecache; - - this.metaTableDescritor = TableDescriptor.metaTableDescriptor(conf); } public void setCacheOn() throws IOException { @@ -151,11 +144,8 @@ public class FSTableDescriptors implements TableDescriptors { public TableDescriptor getDescriptor(final TableName tablename) throws IOException { invocations++; - if (TableName.META_TABLE_NAME.equals(tablename)) { - cachehits++; - return new TableDescriptor(metaTableDescritor); - } - // hbase:meta is already handled. If some one tries to get the descriptor for + + // If some one tries to get the descriptor for // .logs, .oldlogs or .corrupt throw an exception. if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) { throw new IOException("No descriptor found for non table = " + tablename); @@ -194,11 +184,8 @@ public class FSTableDescriptors implements TableDescriptors { * to see if a newer file has been created since the cached one was read. */ @Override + @Nullable public HTableDescriptor get(TableName tableName) throws IOException { - if (TableName.META_TABLE_NAME.equals(tableName)) { - cachehits++; - return metaTableDescritor; - } TableDescriptor descriptor = getDescriptor(tableName); return descriptor == null ? null : descriptor.getHTableDescriptor(); } @@ -215,9 +202,6 @@ public class FSTableDescriptors implements TableDescriptors { for (Map.Entry entry: this.cache.entrySet()) { tds.put(entry.getKey().toString(), entry.getValue()); } - // add hbase:meta to the response - tds.put(this.metaTableDescritor.getNameAsString(), - new TableDescriptor(metaTableDescritor)); } else { LOG.debug("Fetching table descriptors from the filesystem."); boolean allvisited = true; @@ -289,9 +273,6 @@ public class FSTableDescriptors implements TableDescriptors { throw new NotImplementedException("Cannot add a table descriptor - in read only mode"); } TableName tableName = htd.getHTableDescriptor().getTableName(); - if (TableName.META_TABLE_NAME.equals(tableName)) { - throw new NotImplementedException(); - } if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) { throw new NotImplementedException( "Cannot add a table descriptor for a reserved subdirectory name: " @@ -310,9 +291,6 @@ public class FSTableDescriptors implements TableDescriptors { throw new NotImplementedException("Cannot add a table descriptor - in read only mode"); } TableName tableName = htd.getTableName(); - if (TableName.META_TABLE_NAME.equals(tableName)) { - throw new NotImplementedException(); - } if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) { throw new NotImplementedException( "Cannot add a table descriptor for a reserved subdirectory name: " diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 28bae6a..bc37809 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -1293,7 +1293,7 @@ public class HBaseFsck extends Configured implements Closeable { Path rootdir = FSUtils.getRootDir(getConf()); Configuration c = getConf(); HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO); - HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME); + HTableDescriptor metaDescriptor = TableDescriptor.metaTableDescriptor(c); MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, false); // The WAL subsystem will use the default rootDir rather than the passed in rootDir // unless I pass along via the conf. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java index 6002f29..fd152d0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.io.WritableComparator; import org.apache.hadoop.util.GenericOptionsParser; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java index 8ea6178..38d904a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java @@ -640,9 +640,8 @@ public abstract class HBaseTestCase extends TestCase { * @throws IOException */ protected void createMetaRegion() throws IOException { - FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(conf); meta = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, testDir, - conf, fsTableDescriptors.get(TableName.META_TABLE_NAME)); + conf, TableDescriptor.metaTableDescriptor(conf)); } protected void closeRootAndMeta() throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 61d5ba9..c331611 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -399,7 +399,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public HTableDescriptor getMetaTableDescriptor() { try { - return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME); + return TableDescriptor.metaTableDescriptor(conf); } catch (IOException e) { throw new RuntimeException("Unable to create META table descriptor", e); } @@ -1164,6 +1164,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { FSUtils.setRootDir(this.conf, hbaseRootdir); fs.mkdirs(hbaseRootdir); FSUtils.setVersion(fs, hbaseRootdir); + new FSTableDescriptors(conf).createTableDescriptor(TableDescriptor.metaTableDescriptor(conf)); return hbaseRootdir; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java index 0534643..8f1b5eb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.master; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import java.util.HashSet; @@ -30,8 +31,11 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogTask; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.FSUtils; @@ -116,4 +120,24 @@ public class TestMasterFileSystem { zkw.close(); } + @Test + public void testMetaOverwriteIfDiffers() throws Exception { + HMaster master = UTIL.getMiniHBaseCluster().getMaster(); + + TableDescriptors tableDescriptors = master.getTableDescriptors(); + HTableDescriptor mtd = tableDescriptors.get(TableName.META_TABLE_NAME); + // forcing parameter to differ wrom what is expected + mtd.setCompactionEnabled(false); + mtd.setRegionReplication(55); + tableDescriptors.add(mtd); + + UTIL.getHBaseCluster().stopMaster(master.getServerName()); + UTIL.getHBaseCluster().waitForMasterToStop(master.getServerName(), 10000); + HMaster master2 = UTIL.getHBaseCluster().startMaster().getMaster(); + UTIL.getHBaseCluster().waitForActiveAndReadyMaster(10000); + HTableDescriptor mtd2 = master2.getTableDescriptors().get(TableName.META_TABLE_NAME); + assertNotEquals(mtd, mtd2); + assertNotEquals(55, mtd2.getRegionReplication()); + } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index 6c8c77a..a5ea1e9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -34,16 +34,17 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.TableDescriptor; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -73,10 +74,11 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { FileSystem filesystem = FileSystem.get(conf); Path rootdir = testDir; // Up flush size else we bind up when we use default catalog flush of 16k. - fsTableDescriptors.get(TableName.META_TABLE_NAME).setMemStoreFlushSize(64 * 1024 * 1024); + HTableDescriptor metaHtd = TableDescriptor.metaTableDescriptor(conf); + metaHtd.setMemStoreFlushSize(64 * 1024 * 1024); HRegion mr = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, - rootdir, this.conf, fsTableDescriptors.get(TableName.META_TABLE_NAME)); + rootdir, this.conf, metaHtd); try { // Write rows for three tables 'A', 'B', and 'C'. for (char c = 'A'; c < 'D'; c++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java index 5fde726..df3ea8f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java @@ -18,35 +18,34 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - import java.io.IOException; +import com.google.protobuf.ByteString; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.MD5Hash; import org.junit.Assert; import org.junit.Test; import org.junit.experimental.categories.Category; -import com.google.protobuf.ByteString; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; @Category({RegionServerTests.class, SmallTests.class}) public class TestHRegionInfo { @@ -64,14 +63,14 @@ public class TestHRegionInfo { HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO; Path basedir = htu.getDataTestDir(); // Create a region. That'll write the .regioninfo file. - FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration()); + HTableDescriptor metaHtd = TableDescriptor.metaTableDescriptor(htu.getConfiguration()); HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, basedir, htu.getConfiguration(), - fsTableDescriptors.get(TableName.META_TABLE_NAME)); + metaHtd); // Get modtime on the file. long modtime = getModTime(r); HBaseTestingUtility.closeRegionAndWAL(r); Thread.sleep(1001); - r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), + r = HRegion.openHRegion(basedir, hri, metaHtd, null, htu.getConfiguration()); // Ensure the file is not written for a second time. long modtime2 = getModTime(r); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index c09982e..4e5efe0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -301,7 +301,7 @@ public class TestFSTableDescriptors { htds.createTableDescriptor(htd); } // add hbase:meta - HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName()); + HTableDescriptor htd = TableDescriptor.metaTableDescriptor(UTIL.getConfiguration()); htds.createTableDescriptor(htd); assertEquals("getAll() didn't return all TableDescriptors, expected: " + @@ -336,11 +336,11 @@ public class TestFSTableDescriptors { assertTrue(nonchtds.getAll().size() == chtds.getAll().size()); // add a new entry for hbase:meta - HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName()); + HTableDescriptor htd = TableDescriptor.metaTableDescriptor(UTIL.getConfiguration()); nonchtds.createTableDescriptor(htd); - // hbase:meta will only increase the cachehit by 1 - assertTrue(nonchtds.getAll().size() == chtds.getAll().size()); + // hbase:meta should appear + assertEquals(nonchtds.getAll().size(), chtds.getAll().size() + 1); for (Map.Entry entry: nonchtds.getAll().entrySet()) { String t = (String) entry.getKey();