diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java index 8e23f97..a5db381 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.hbase; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import java.io.IOException; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; @@ -27,16 +27,20 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.BuilderStyleTest; -import org.junit.experimental.categories.Category; import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; /** Tests the HColumnDescriptor with appropriate arguments */ @Category(SmallTests.class) public class TestHColumnDescriptor { @Test - public void testPb() throws DeserializationException { - HColumnDescriptor hcd = new HColumnDescriptor( - HTableDescriptor.META_TABLEDESC.getColumnFamilies()[0]); + public void testPb() throws DeserializationException, IOException { + HTableDescriptor metaTableDescriptor = + HTableDescriptor.metaTableDescriptor(new Configuration()); + HColumnDescriptor hcd = new HColumnDescriptor(metaTableDescriptor.getColumnFamilies()[0]); final int v = 123; hcd.setBlocksize(v); hcd.setTimeToLive(v); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java index 8dc141b..f80d4ad 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java @@ -27,6 +27,7 @@ import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -44,7 +45,7 @@ public class TestHTableDescriptor { @Test public void testPb() throws DeserializationException, IOException { - HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC); + HTableDescriptor htd = HTableDescriptor.metaTableDescriptor(new Configuration()); final int v = 123; htd.setMaxFileSize(v); htd.setDurability(Durability.ASYNC_WAL); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 686ff15..830f1ef 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -984,9 +984,7 @@ public final class HConstants { /** Directories that are not HBase user table directories */ public static final List HBASE_NON_USER_TABLE_DIRS = - Collections.unmodifiableList(Arrays.asList((String[])ArrayUtils.addAll( - new String[] { TableName.META_TABLE_NAME.getNameAsString() }, - HBASE_NON_TABLE_DIRS.toArray()))); + Collections.unmodifiableList(HBASE_NON_TABLE_DIRS); /** Health script related settings. */ public static final String HEALTH_SCRIPT_LOC = "hbase.node.health.script.location"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 93bde36..48f3e2b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -29,14 +29,12 @@ import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.ClusterId; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -45,17 +43,19 @@ import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.HFileArchiver; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.wal.DefaultWALProvider; -import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.wal.DefaultWALProvider; +import org.apache.hadoop.hbase.wal.WALSplitter; /** * This class abstracts a bunch of operations the HMaster needs to interact with @@ -465,11 +465,22 @@ public class MasterFileSystem { // Create tableinfo-s for hbase:meta if not already there. - // meta table is a system table, so descriptors are predefined, - // we should get them from registry. + // Meta table descriptor stored in the hdfs. But we need to + // be sure that it has all recent changes, and it actually + // produced from config. So what we can do here is + // overwrite it if it differ from what we are expecting. FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd); - fsd.createTableDescriptor( - new HTableDescriptor(fsd.get(TableName.META_TABLE_NAME))); + HTableDescriptor compiledMetaDescriptor = HTableDescriptor.metaTableDescriptor(c); + boolean exists = fsd.createTableDescriptor(compiledMetaDescriptor); + if (exists) { + HTableDescriptor storedMetaDescriptor = fsd.get(compiledMetaDescriptor.getTableName()); + if (!storedMetaDescriptor.equals(compiledMetaDescriptor)) { + LOG.warn( + "META table descriptor differs from what produced from config, META table " + + "descriptor will be overwritten."); + fsd.add(compiledMetaDescriptor); + } + } return rd; } @@ -509,7 +520,7 @@ public class MasterFileSystem { // not make it in first place. Turn off block caching for bootstrap. // Enable after. HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO); - HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME); + HTableDescriptor metaDescriptor = HTableDescriptor.metaTableDescriptor(c); setInfoFamilyCachingForMeta(metaDescriptor, false); HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor); setInfoFamilyCachingForMeta(metaDescriptor, true); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 7cd2673..349439b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -91,11 +91,6 @@ public class FSTableDescriptors implements TableDescriptors { private final Map cache = new ConcurrentHashMap(); - /** - * Table descriptor for hbase:meta catalog table - */ - private final HTableDescriptor metaTableDescriptor; - /** * Construct a FSTableDescriptors instance using the hbase root dir of the given * conf and the filesystem where that root dir lives. @@ -121,7 +116,6 @@ public class FSTableDescriptors implements TableDescriptors { this.rootdir = rootdir; this.fsreadonly = fsreadonly; this.usecache = usecache; - this.metaTableDescriptor = HTableDescriptor.metaTableDescriptor(conf); } public void setCacheOn() throws IOException { @@ -149,11 +143,8 @@ public class FSTableDescriptors implements TableDescriptors { public HTableDescriptor get(final TableName tablename) throws IOException { invocations++; - if (TableName.META_TABLE_NAME.equals(tablename)) { - cachehits++; - return metaTableDescriptor; - } - // hbase:meta is already handled. If some one tries to get the descriptor for + + // If some one tries to get the descriptor for // .logs, .oldlogs or .corrupt throw an exception. if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) { throw new IOException("No descriptor found for non table = " + tablename); @@ -197,9 +188,6 @@ public class FSTableDescriptors implements TableDescriptors { for (Map.Entry entry: this.cache.entrySet()) { htds.put(entry.getKey().toString(), entry.getValue()); } - // add hbase:meta to the response - htds.put(HTableDescriptor.META_TABLEDESC.getTableName().getNameAsString(), - HTableDescriptor.META_TABLEDESC); } else { LOG.debug("Fetching table descriptors from the filesystem."); boolean allvisited = true; @@ -255,9 +243,6 @@ public class FSTableDescriptors implements TableDescriptors { if (fsreadonly) { throw new NotImplementedException("Cannot add a table descriptor - in read only mode"); } - if (TableName.META_TABLE_NAME.equals(htd.getTableName())) { - throw new NotImplementedException(); - } if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getTableName().getNameAsString())) { throw new NotImplementedException( "Cannot add a table descriptor for a reserved subdirectory name: " + htd.getNameAsString()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index dcbb8f1..351d9c2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -1300,7 +1300,7 @@ public class HBaseFsck extends Configured implements Closeable { Path rootdir = FSUtils.getRootDir(getConf()); Configuration c = getConf(); HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO); - HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME); + HTableDescriptor metaDescriptor = HTableDescriptor.metaTableDescriptor(c); MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, false); HRegion meta = HRegion.createHRegion(metaHRI, rootdir, c, metaDescriptor); MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java index 596cfaa..1fd8ebf 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java @@ -640,9 +640,8 @@ public abstract class HBaseTestCase extends TestCase { * @throws IOException */ protected void createMetaRegion() throws IOException { - FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(conf); meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir, conf, - fsTableDescriptors.get(TableName.META_TABLE_NAME)); + HTableDescriptor.metaTableDescriptor(conf)); } protected void closeRootAndMeta() throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 04f3676..7d7a043 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -385,7 +385,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public HTableDescriptor getMetaTableDescriptor() { try { - return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME); + return HTableDescriptor.metaTableDescriptor(conf); } catch (IOException e) { throw new RuntimeException("Unable to create META table descriptor", e); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java index dae361d..6dc5060 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java @@ -17,10 +17,6 @@ */ package org.apache.hadoop.hbase.master; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - import java.util.HashSet; import java.util.Set; @@ -30,10 +26,14 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogTask; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.zookeeper.ZKSplitLog; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -44,6 +44,11 @@ import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; + /** * Test the master filesystem in a local cluster */ @@ -115,4 +120,23 @@ public class TestMasterFileSystem { zkw.close(); } + @Test + public void testMetaOverwriteIfDiffers() throws Exception { + HMaster master = UTIL.getMiniHBaseCluster().getMaster(); + + TableDescriptors tableDescriptors = master.getTableDescriptors(); + HTableDescriptor mtd = tableDescriptors.get(TableName.META_TABLE_NAME); + // forcing parameter to differ wrom what is expected + mtd.setCompactionEnabled(false); + mtd.setRegionReplication(55); + tableDescriptors.add(mtd); + + UTIL.getHBaseCluster().stopMaster(master.getServerName()); + UTIL.getHBaseCluster().waitForMasterToStop(master.getServerName(), 10000); + HMaster master2 = UTIL.getHBaseCluster().startMaster().getMaster(); + UTIL.getHBaseCluster().waitForActiveAndReadyMaster(10000); + HTableDescriptor mtd2 = master2.getTableDescriptors().get(TableName.META_TABLE_NAME); + assertNotEquals(mtd, mtd2); + assertNotEquals(55, mtd2.getRegionReplication()); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index ece908e..36074c2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -24,7 +24,6 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; @@ -66,12 +65,12 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { public void testUsingMetaAndBinary() throws IOException { - FileSystem filesystem = FileSystem.get(conf); Path rootdir = testDir; // Up flush size else we bind up when we use default catalog flush of 16k. - fsTableDescriptors.get(TableName.META_TABLE_NAME).setMemStoreFlushSize(64 * 1024 * 1024); + HTableDescriptor metaHtd = HTableDescriptor.metaTableDescriptor(conf); + metaHtd.setMemStoreFlushSize(64 * 1024 * 1024); HRegion mr = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, - rootdir, this.conf, fsTableDescriptors.get(TableName.META_TABLE_NAME)); + rootdir, this.conf, metaHtd); try { // Write rows for three tables 'A', 'B', and 'C'. for (char c = 'A'; c < 'D'; c++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java index e29bef8..67520bd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java @@ -18,31 +18,30 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - import java.io.IOException; +import com.google.protobuf.ByteString; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.MD5Hash; import org.junit.Test; import org.junit.experimental.categories.Category; -import com.google.protobuf.ByteString; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; @Category(SmallTests.class) public class TestHRegionInfo { @@ -59,15 +58,15 @@ public class TestHRegionInfo { HBaseTestingUtility htu = new HBaseTestingUtility(); HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO; Path basedir = htu.getDataTestDir(); - FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration()); // Create a region. That'll write the .regioninfo file. - HRegion r = HRegion.createHRegion(hri, basedir, htu.getConfiguration(), - fsTableDescriptors.get(TableName.META_TABLE_NAME)); + HTableDescriptor metaHtd = + HTableDescriptor.metaTableDescriptor(htu.getConfiguration()); + HRegion r = HRegion.createHRegion(hri, basedir, htu.getConfiguration(), metaHtd); // Get modtime on the file. long modtime = getModTime(r); HRegion.closeHRegion(r); Thread.sleep(1001); - r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), + r = HRegion.openHRegion(basedir, hri, metaHtd, null, htu.getConfiguration()); // Ensure the file is not written for a second time. long modtime2 = getModTime(r); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java index 8727e23..503417e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hbase.regionserver.wal; -import static org.junit.Assert.assertFalse; - import java.io.IOException; import java.util.concurrent.atomic.AtomicLong; @@ -32,8 +30,8 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; @@ -43,6 +41,8 @@ import org.apache.hadoop.hbase.wal.WALKey; import org.junit.Test; import org.junit.experimental.categories.Category; +import static org.junit.Assert.assertFalse; + /** * Test many concurrent appenders to an {@link #WAL} while rolling the log. */ @@ -135,8 +135,8 @@ public class TestLogRollingNoCluster { byte[] bytes = Bytes.toBytes(i); edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY)); final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO; - final FSTableDescriptors fts = new FSTableDescriptors(TEST_UTIL.getConfiguration()); - final HTableDescriptor htd = fts.get(TableName.META_TABLE_NAME); + final HTableDescriptor htd = + HTableDescriptor.metaTableDescriptor(TEST_UTIL.getConfiguration()); final long txid = wal.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(), TableName.META_TABLE_NAME, now), edit, sequenceId, true, null); wal.sync(txid); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index df01d71..82bf594 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -261,7 +261,7 @@ public class TestFSTableDescriptors { htds.createTableDescriptor(htd); } // add hbase:meta - HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName()); + HTableDescriptor htd = HTableDescriptor.metaTableDescriptor(UTIL.getConfiguration()); htds.createTableDescriptor(htd); assertTrue(htds.getAll().size() == count + 1); @@ -294,11 +294,11 @@ public class TestFSTableDescriptors { assertTrue(nonchtds.getAll().size() == chtds.getAll().size()); // add a new entry for hbase:meta - HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName()); + HTableDescriptor htd = HTableDescriptor.metaTableDescriptor(UTIL.getConfiguration()); nonchtds.createTableDescriptor(htd); - // hbase:meta will only increase the cachehit by 1 - assertTrue(nonchtds.getAll().size() == chtds.getAll().size()); + // hbase:meta should appear + assertTrue(nonchtds.getAll().size() == chtds.getAll().size() + 1); for (Map.Entry entry : nonchtds.getAll().entrySet()) { String t = (String) entry.getKey();