diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 51352bb..e3d8365 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -1314,49 +1314,6 @@ public class HTableDescriptor implements Comparable { new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString())))); } - /** Table descriptor for hbase:meta catalog table - * Deprecated, use TableDescriptors#get(TableName.META_TABLE) or - * Admin#getTableDescriptor(TableName.META_TABLE) instead. - */ - @Deprecated - public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor( - TableName.META_TABLE_NAME, - new HColumnDescriptor[] { - new HColumnDescriptor(HConstants.CATALOG_FAMILY) - // Ten is arbitrary number. Keep versions to help debugging. - .setMaxVersions(10) - .setInMemory(true) - .setBlocksize(8 * 1024) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. - .setBloomFilterType(BloomType.NONE) - // Enable cache of data blocks in L1 if more than one caching tier deployed: - // e.g. if using CombinedBlockCache (BucketCache). - .setCacheDataInL1(true), - new HColumnDescriptor(HConstants.TABLE_FAMILY) - // Ten is arbitrary number. Keep versions to help debugging. - .setMaxVersions(10) - .setInMemory(true) - .setBlocksize(8 * 1024) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. - .setBloomFilterType(BloomType.NONE) - // Enable cache of data blocks in L1 if more than one caching tier deployed: - // e.g. if using CombinedBlockCache (BucketCache). - .setCacheDataInL1(true) - }); - - static { - try { - META_TABLEDESC.addCoprocessor( - "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint", - null, Coprocessor.PRIORITY_SYSTEM, null); - } catch (IOException ex) { - //LOG.warn("exception in loading coprocessor for the hbase:meta table"); - throw new RuntimeException(ex); - } - } - public final static String NAMESPACE_FAMILY_INFO = "info"; public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO); public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d"); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index d11eadd..a3edfaa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -560,9 +560,6 @@ public class HTable implements HTableInterface { public HTableDescriptor getTableDescriptor() throws IOException { // TODO: This is the same as HBaseAdmin.getTableDescriptor(). Only keep one. if (tableName == null) return null; - if (tableName.equals(TableName.META_TABLE_NAME)) { - return HTableDescriptor.META_TABLEDESC; - } HTableDescriptor htd = executeMasterCallable( new MasterCallable(getConnection()) { @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 40f67f3..2b6bd58 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -1002,9 +1002,7 @@ public final class HConstants { /** Directories that are not HBase user table directories */ public static final List HBASE_NON_USER_TABLE_DIRS = - Collections.unmodifiableList(Arrays.asList((String[])ArrayUtils.addAll( - new String[] { TableName.META_TABLE_NAME.getNameAsString() }, - HBASE_NON_TABLE_DIRS.toArray()))); + Collections.unmodifiableList(HBASE_NON_TABLE_DIRS); /** Health script related settings. */ public static final String HEALTH_SCRIPT_LOC = "hbase.node.health.script.location"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 78e4c11..d1dc831 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -471,7 +471,7 @@ public class MasterFileSystem { // we should get them from registry. FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd); fsd.createTableDescriptor( - new TableDescriptor(fsd.get(TableName.META_TABLE_NAME))); + new TableDescriptor(TableDescriptor.metaTableDescriptor(c))); return rd; } @@ -511,7 +511,7 @@ public class MasterFileSystem { // not make it in first place. Turn off block caching for bootstrap. // Enable after. HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO); - HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME); + HTableDescriptor metaDescriptor = TableDescriptor.metaTableDescriptor(c); setInfoFamilyCachingForMeta(metaDescriptor, false); HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor, null); setInfoFamilyCachingForMeta(metaDescriptor, true); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index cce37d7..5b16fa8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -92,11 +92,6 @@ public class FSTableDescriptors implements TableDescriptors { new ConcurrentHashMap(); /** - * Table descriptor for hbase:meta catalog table - */ - private final HTableDescriptor metaTableDescritor; - - /** * Construct a FSTableDescriptors instance using the hbase root dir of the given * conf and the filesystem where that root dir lives. * This instance can do write operations (is not read only). @@ -121,8 +116,6 @@ public class FSTableDescriptors implements TableDescriptors { this.rootdir = rootdir; this.fsreadonly = fsreadonly; this.usecache = usecache; - - this.metaTableDescritor = TableDescriptor.metaTableDescriptor(conf); } public void setCacheOn() throws IOException { @@ -151,11 +144,8 @@ public class FSTableDescriptors implements TableDescriptors { public TableDescriptor getDescriptor(final TableName tablename) throws IOException { invocations++; - if (TableName.META_TABLE_NAME.equals(tablename)) { - cachehits++; - return new TableDescriptor(metaTableDescritor); - } - // hbase:meta is already handled. If some one tries to get the descriptor for + + // If some one tries to get the descriptor for // .logs, .oldlogs or .corrupt throw an exception. if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) { throw new IOException("No descriptor found for non table = " + tablename); @@ -195,10 +185,6 @@ public class FSTableDescriptors implements TableDescriptors { */ @Override public HTableDescriptor get(TableName tableName) throws IOException { - if (TableName.META_TABLE_NAME.equals(tableName)) { - cachehits++; - return metaTableDescritor; - } TableDescriptor descriptor = getDescriptor(tableName); return descriptor == null ? null : descriptor.getHTableDescriptor(); } @@ -215,9 +201,6 @@ public class FSTableDescriptors implements TableDescriptors { for (Map.Entry entry: this.cache.entrySet()) { tds.put(entry.getKey().toString(), entry.getValue()); } - // add hbase:meta to the response - tds.put(this.metaTableDescritor.getNameAsString(), - new TableDescriptor(metaTableDescritor)); } else { LOG.debug("Fetching table descriptors from the filesystem."); boolean allvisited = true; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index a8b60cd..df84bac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -1229,7 +1229,7 @@ public class HBaseFsck extends Configured implements Closeable { Path rootdir = FSUtils.getRootDir(getConf()); Configuration c = getConf(); HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO); - HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME); + HTableDescriptor metaDescriptor = TableDescriptor.metaTableDescriptor(c); MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, false); // The WAL subsystem will use the default rootDir rather than the passed in rootDir // unless I pass along via the conf. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java index e4dc09e..0b5f74a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java @@ -639,9 +639,8 @@ public abstract class HBaseTestCase extends TestCase { * @throws IOException */ protected void createMetaRegion() throws IOException { - FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(conf); meta = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, testDir, - conf, fsTableDescriptors.get(TableName.META_TABLE_NAME)); + conf, TableDescriptor.metaTableDescriptor(conf)); } protected void closeRootAndMeta() throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 0ef7887..54afd10 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -399,7 +399,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public HTableDescriptor getMetaTableDescriptor() { try { - return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME); + return TableDescriptor.metaTableDescriptor(conf); } catch (IOException e) { throw new RuntimeException("Unable to create META table descriptor", e); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index 0d7820f..ef5a979 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.TableName; @@ -72,10 +73,11 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { FileSystem filesystem = FileSystem.get(conf); Path rootdir = testDir; // Up flush size else we bind up when we use default catalog flush of 16k. - fsTableDescriptors.get(TableName.META_TABLE_NAME).setMemStoreFlushSize(64 * 1024 * 1024); + HTableDescriptor metaHtd = TableDescriptor.metaTableDescriptor(conf); + metaHtd.setMemStoreFlushSize(64 * 1024 * 1024); HRegion mr = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, - rootdir, this.conf, fsTableDescriptors.get(TableName.META_TABLE_NAME)); + rootdir, this.conf, metaHtd); try { // Write rows for three tables 'A', 'B', and 'C'. for (char c = 'A'; c < 'D'; c++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java index 5fde726..df3ea8f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java @@ -18,35 +18,34 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - import java.io.IOException; +import com.google.protobuf.ByteString; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.MD5Hash; import org.junit.Assert; import org.junit.Test; import org.junit.experimental.categories.Category; -import com.google.protobuf.ByteString; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; @Category({RegionServerTests.class, SmallTests.class}) public class TestHRegionInfo { @@ -64,14 +63,14 @@ public class TestHRegionInfo { HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO; Path basedir = htu.getDataTestDir(); // Create a region. That'll write the .regioninfo file. - FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration()); + HTableDescriptor metaHtd = TableDescriptor.metaTableDescriptor(htu.getConfiguration()); HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, basedir, htu.getConfiguration(), - fsTableDescriptors.get(TableName.META_TABLE_NAME)); + metaHtd); // Get modtime on the file. long modtime = getModTime(r); HBaseTestingUtility.closeRegionAndWAL(r); Thread.sleep(1001); - r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), + r = HRegion.openHRegion(basedir, hri, metaHtd, null, htu.getConfiguration()); // Ensure the file is not written for a second time. long modtime2 = getModTime(r); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index c09982e..4e5efe0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -301,7 +301,7 @@ public class TestFSTableDescriptors { htds.createTableDescriptor(htd); } // add hbase:meta - HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName()); + HTableDescriptor htd = TableDescriptor.metaTableDescriptor(UTIL.getConfiguration()); htds.createTableDescriptor(htd); assertEquals("getAll() didn't return all TableDescriptors, expected: " + @@ -336,11 +336,11 @@ public class TestFSTableDescriptors { assertTrue(nonchtds.getAll().size() == chtds.getAll().size()); // add a new entry for hbase:meta - HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName()); + HTableDescriptor htd = TableDescriptor.metaTableDescriptor(UTIL.getConfiguration()); nonchtds.createTableDescriptor(htd); - // hbase:meta will only increase the cachehit by 1 - assertTrue(nonchtds.getAll().size() == chtds.getAll().size()); + // hbase:meta should appear + assertEquals(nonchtds.getAll().size(), chtds.getAll().size() + 1); for (Map.Entry entry: nonchtds.getAll().entrySet()) { String t = (String) entry.getKey();