From 4f32e251b301e1e9533852594a79384dccc007be Mon Sep 17 00:00:00 2001 From: Andrew Purtell Date: Tue, 28 Oct 2014 13:50:34 -0700 Subject: [PATCH] HBASE-12205 Backport HBASE-12016 'Reduce number of versions in Meta table. Make it configurable' to 0.98 --- .../org/apache/hadoop/hbase/HTableDescriptor.java | 34 ++++++++++++++++++++-- .../apache/hadoop/hbase/catalog/MetaReader.java | 3 +- .../hadoop/hbase/client/HConnectionManager.java | 3 -- .../java/org/apache/hadoop/hbase/HConstants.java | 20 +++++++++++++ .../org/apache/hadoop/hbase/master/HMaster.java | 2 +- .../hadoop/hbase/master/MasterFileSystem.java | 22 ++++++++------ .../hadoop/hbase/migration/NamespaceUpgrade.java | 3 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 4 ++- .../hadoop/hbase/regionserver/HRegionServer.java | 2 +- .../hbase/security/access/AccessController.java | 11 ++----- .../hadoop/hbase/snapshot/SnapshotManifest.java | 2 +- .../hadoop/hbase/util/FSTableDescriptors.java | 25 ++++++++++------ .../org/apache/hadoop/hbase/util/HBaseFsck.java | 8 ++--- .../org/apache/hadoop/hbase/util/MetaUtils.java | 7 +++-- .../org/apache/hadoop/hbase/HBaseTestCase.java | 14 +++++++-- .../apache/hadoop/hbase/HBaseTestingUtility.java | 12 ++++++++ .../hbase/TestFSTableDescriptorForceCreation.java | 6 ++-- .../apache/hadoop/hbase/TestHColumnDescriptor.java | 10 +++---- .../apache/hadoop/hbase/TestHTableDescriptor.java | 2 +- .../org/apache/hadoop/hbase/client/TestAdmin.java | 4 +-- .../hadoop/hbase/master/TestMasterFailover.java | 6 ++-- .../hbase/migration/TestNamespaceUpgrade.java | 2 +- .../regionserver/TestGetClosestAtOrBefore.java | 5 ++-- .../hadoop/hbase/regionserver/TestHRegionInfo.java | 8 +++-- .../regionserver/wal/TestLogRollingNoCluster.java | 6 ++-- .../hadoop/hbase/snapshot/TestExportSnapshot.java | 2 +- .../hadoop/hbase/util/TestFSTableDescriptors.java | 19 ++++++------ .../apache/hadoop/hbase/util/TestHBaseFsck.java | 16 +++++----- .../apache/hadoop/hbase/util/TestMergeTable.java | 4 +-- .../apache/hadoop/hbase/util/TestMergeTool.java | 2 +- .../util/hbck/OfflineMetaRebuildTestCore.java | 2 +- 31 files changed, 170 insertions(+), 96 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index c32ad56..e01db67 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -39,6 +39,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.exceptions.DeserializationException; @@ -1326,15 +1327,20 @@ public class HTableDescriptor implements WritableComparable { new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString())))); } - /** Table descriptor for hbase:meta catalog table */ + /** + * Table descriptor for hbase:meta catalog table + * @deprecated Use TableDescriptors#get(TableName.META_TABLE_NAME) or + * HBaseAdmin#getTableDescriptor(TableName.META_TABLE_NAME) instead. + */ + @Deprecated public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor( TableName.META_TABLE_NAME, new HColumnDescriptor[] { new HColumnDescriptor(HConstants.CATALOG_FAMILY) // Ten is arbitrary number. Keep versions to help debugging. - .setMaxVersions(10) + .setMaxVersions(HConstants.DEFAULT_HBASE_META_VERSIONS) .setInMemory(true) - .setBlocksize(8 * 1024) + .setBlocksize(HConstants.DEFAULT_HBASE_META_BLOCK_SIZE) .setScope(HConstants.REPLICATION_SCOPE_LOCAL) // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. .setBloomFilterType(BloomType.NONE) @@ -1505,4 +1511,26 @@ public class HTableDescriptor implements WritableComparable { public void removeConfiguration(final String key) { configuration.remove(key); } + + public static HTableDescriptor metaTableDescriptor(final Configuration conf) + throws IOException { + HTableDescriptor metaDescriptor = new HTableDescriptor( + TableName.META_TABLE_NAME, + new HColumnDescriptor[] { + new HColumnDescriptor(HConstants.CATALOG_FAMILY) + .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS, + HConstants.DEFAULT_HBASE_META_VERSIONS)) + .setInMemory(true) + .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE, + HConstants.DEFAULT_HBASE_META_BLOCK_SIZE)) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL) + // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. + .setBloomFilterType(BloomType.NONE) + }); + metaDescriptor.addCoprocessor( + "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint", + null, Coprocessor.PRIORITY_SYSTEM, null); + return metaDescriptor; + } + } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java index 821b441..492af35 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/MetaReader.java @@ -24,7 +24,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HTable; @@ -279,7 +278,7 @@ public class MetaReader { public static boolean tableExists(CatalogTracker catalogTracker, final TableName tableName) throws IOException { - if (tableName.equals(HTableDescriptor.META_TABLEDESC.getTableName())) { + if (tableName.equals(TableName.META_TABLE_NAME)) { // Catalog tables always exist. return true; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index 4fa5a8b..2c29800 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -2651,9 +2651,6 @@ public class HConnectionManager { public HTableDescriptor getHTableDescriptor(final TableName tableName) throws IOException { if (tableName == null) return null; - if (tableName.equals(TableName.META_TABLE_NAME)) { - return HTableDescriptor.META_TABLEDESC; - } MasterKeepAliveConnection master = getKeepAliveMasterService(); GetTableDescriptorsResponse htds; try { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 5eddd7f..6d484cb 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -677,6 +677,26 @@ public final class HConstants { public static int DEFAULT_HBASE_CLIENT_SCANNER_CACHING = 100; /** + * Parameter name for number of versions, kept by meta table. + */ + public static String HBASE_META_VERSIONS = "hbase.meta.versions"; + + /** + * Default value of {@link #HBASE_META_VERSIONS}. + */ + public static int DEFAULT_HBASE_META_VERSIONS = 10; + + /** + * Parameter name for number of versions, kept by meta table. + */ + public static String HBASE_META_BLOCK_SIZE = "hbase.meta.blocksize"; + + /** + * Default value of {@link #HBASE_META_BLOCK_SIZE}. + */ + public static int DEFAULT_HBASE_META_BLOCK_SIZE = 8 * 1024; + + /** * Parameter name for number of rows that will be fetched when calling next on * a scanner if it is not served from memory. Higher caching values will * enable faster scanners but will eat up more memory and some calls of next diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java index 4e55cb4..a3e6e7c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java @@ -803,7 +803,7 @@ MasterServices, Server { this.fileSystemManager = new MasterFileSystem(this, this, masterRecovery); this.tableDescriptors = - new FSTableDescriptors(this.fileSystemManager.getFileSystem(), + new FSTableDescriptors(this.conf, this.fileSystemManager.getFileSystem(), this.fileSystemManager.getRootDir()); // publish cluster ID diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index f0a4cee..3383314 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -490,7 +490,12 @@ public class MasterFileSystem { } // Create tableinfo-s for hbase:meta if not already there. - new FSTableDescriptors(fs, rd).createTableDescriptor(HTableDescriptor.META_TABLEDESC); + + // meta table is a system table, so descriptors are predefined, + // we should get them from registry. + FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd); + fsd.createTableDescriptor( + new HTableDescriptor(fsd.get(TableName.META_TABLE_NAME))); return rd; } @@ -530,10 +535,10 @@ public class MasterFileSystem { // not make it in first place. Turn off block caching for bootstrap. // Enable after. HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO); - setInfoFamilyCachingForMeta(false); - HRegion meta = HRegion.createHRegion(metaHRI, rd, c, - HTableDescriptor.META_TABLEDESC, null, true, true); - setInfoFamilyCachingForMeta(true); + HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME); + setInfoFamilyCachingForMeta(metaDescriptor, false); + HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor); + setInfoFamilyCachingForMeta(metaDescriptor, true); HRegion.closeHRegion(meta); } catch (IOException e) { e = RemoteExceptionHandler.checkIOException(e); @@ -545,9 +550,9 @@ public class MasterFileSystem { /** * Enable in memory caching for hbase:meta */ - public static void setInfoFamilyCachingForMeta(final boolean b) { - for (HColumnDescriptor hcd: - HTableDescriptor.META_TABLEDESC.getColumnFamilies()) { + public static void setInfoFamilyCachingForMeta(final HTableDescriptor metaDescriptor, + final boolean b) { + for (HColumnDescriptor hcd: metaDescriptor.getColumnFamilies()) { if (Bytes.equals(hcd.getName(), HConstants.CATALOG_FAMILY)) { hcd.setBlockCacheEnabled(b); hcd.setInMemory(b); @@ -555,7 +560,6 @@ public class MasterFileSystem { } } - public void deleteRegion(HRegionInfo region) throws IOException { HFileArchiver.archiveRegion(conf, fs, region); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java index 748f1a8..440b657 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/NamespaceUpgrade.java @@ -384,8 +384,9 @@ public class NamespaceUpgrade implements Tool { HLog metaHLog = HLogFactory.createMetaHLog(fs, rootDir, metaLogName, conf, null, fakeServer.toString()); + FSTableDescriptors fst = new FSTableDescriptors(conf); HRegion meta = HRegion.openHRegion(rootDir, HRegionInfo.FIRST_META_REGIONINFO, - HTableDescriptor.META_TABLEDESC, metaHLog, conf); + fst.get(TableName.META_TABLE_NAME), metaHLog, conf); HRegion region = null; try { for(Path regionDir : FSUtils.getRegionDirs(fs, oldTablePath)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 37accdf..6286615 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -136,6 +136,7 @@ import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.CompressionTest; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HashedBytes; import org.apache.hadoop.hbase.util.Pair; @@ -5721,10 +5722,11 @@ public class HRegion implements HeapSize { // , Writable{ final boolean majorCompact) throws IOException { HRegion region = null; + FSTableDescriptors fst = new FSTableDescriptors(c); // Currently expects tables have one region only. if (FSUtils.getTableName(p).equals(TableName.META_TABLE_NAME)) { region = HRegion.newHRegion(p, log, fs, c, - HRegionInfo.FIRST_META_REGIONINFO, HTableDescriptor.META_TABLEDESC, null); + HRegionInfo.FIRST_META_REGIONINFO, fst.get(TableName.META_TABLE_NAME), null); } else { throw new IOException("Not a known catalog table: " + p.toString()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 9fcc14c..14d9bc0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1309,7 +1309,7 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true); this.fs = new HFileSystem(this.conf, useHBaseChecksum); this.rootDir = FSUtils.getRootDir(this.conf); - this.tableDescriptors = new FSTableDescriptors(this.fs, this.rootDir, true); + this.tableDescriptors = new FSTableDescriptors(this.conf, this.fs, this.rootDir, true); this.hlog = setupWALAndReplication(); // Init in here rather than in constructor after thread name has been set this.metricsRegionServer = new MetricsRegionServer(new MetricsRegionServerWrapperImpl(this)); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java index 40bbbad..10ae21d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java @@ -49,8 +49,6 @@ import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNotDisabledException; -import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.catalog.MetaReader; import org.apache.hadoop.hbase.client.Append; @@ -2197,14 +2195,9 @@ public class AccessController extends BaseMasterAndRegionObserver else { MasterServices masterServices = ctx.getEnvironment().getMasterServices(); for (TableName tableName: tableNamesList) { - // Do not deny if the table does not exist - try { - masterServices.checkTableModifiable(tableName); - } catch (TableNotFoundException ex) { - // Skip checks for a table that does not exist + // Skip checks for a table that does not exist + if (masterServices.getTableDescriptors().get(tableName) == null) { continue; - } catch (TableNotDisabledException ex) { - // We don't care about this } requirePermission("getTableDescriptors", tableName, null, null, Action.ADMIN, Action.CREATE); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java index 4e0b75f..38ccf08 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java @@ -352,7 +352,7 @@ public class SnapshotManifest { Path rootDir = FSUtils.getRootDir(conf); LOG.info("Using old Snapshot Format"); // write a copy of descriptor to the snapshot directory - new FSTableDescriptors(fs, rootDir) + new FSTableDescriptors(conf, fs, rootDir) .createTableDescriptorForTableDirectory(workingDir, htd, false); } else { LOG.debug("Convert to Single Snapshot Manifest"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index e335752..7d8983a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -89,6 +89,11 @@ public class FSTableDescriptors implements TableDescriptors { new ConcurrentHashMap(); /** + * Table descriptor for hbase:meta catalog table + */ + private final HTableDescriptor metaTableDescriptor; + + /** * Data structure to hold modification time and table descriptor. */ private static class TableDescriptorAndModtime { @@ -115,23 +120,25 @@ public class FSTableDescriptors implements TableDescriptors { * This instance can do write operations (is not read only). */ public FSTableDescriptors(final Configuration conf) throws IOException { - this(FSUtils.getCurrentFileSystem(conf), FSUtils.getRootDir(conf)); + this(conf, FSUtils.getCurrentFileSystem(conf), FSUtils.getRootDir(conf)); } - - public FSTableDescriptors(final FileSystem fs, final Path rootdir) { - this(fs, rootdir, false); + + public FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir) + throws IOException { + this(conf, fs, rootdir, false); } /** * @param fsreadonly True if we are read-only when it comes to filesystem * operations; i.e. on remove, we do not do delete in fs. */ - public FSTableDescriptors(final FileSystem fs, - final Path rootdir, final boolean fsreadonly) { + public FSTableDescriptors(final Configuration conf, final FileSystem fs, + final Path rootdir, final boolean fsreadonly) throws IOException { super(); this.fs = fs; this.rootdir = rootdir; this.fsreadonly = fsreadonly; + this.metaTableDescriptor = HTableDescriptor.metaTableDescriptor(conf); } /** @@ -144,9 +151,9 @@ public class FSTableDescriptors implements TableDescriptors { public HTableDescriptor get(final TableName tablename) throws IOException { invocations++; - if (HTableDescriptor.META_TABLEDESC.getTableName().equals(tablename)) { + if (TableName.META_TABLE_NAME.equals(tablename)) { cachehits++; - return HTableDescriptor.META_TABLEDESC; + return metaTableDescriptor; } // hbase:meta is already handled. If some one tries to get the descriptor for // .logs, .oldlogs or .corrupt throw an exception. @@ -490,7 +497,7 @@ public class FSTableDescriptors implements TableDescriptors { */ private TableDescriptorAndModtime getTableDescriptorAndModtime(TableName tableName) throws IOException { - // ignore both -ROOT- and hbase:meta tables + // ignore hbase:meta if (tableName.equals(TableName.META_TABLE_NAME)) { return null; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 1fc8adf..ed6f7ff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -1175,10 +1175,10 @@ public class HBaseFsck extends Configured { Path rootdir = FSUtils.getRootDir(getConf()); Configuration c = getConf(); HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO); - MasterFileSystem.setInfoFamilyCachingForMeta(false); - HRegion meta = HRegion.createHRegion(metaHRI, rootdir, c, - HTableDescriptor.META_TABLEDESC); - MasterFileSystem.setInfoFamilyCachingForMeta(true); + HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME); + MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, false); + HRegion meta = HRegion.createHRegion(metaHRI, rootdir, c, metaDescriptor); + MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, true); return meta; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java index 4ff2a94..ac3a2ad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaUtils.java @@ -32,7 +32,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.regionserver.wal.HLogFactory; public class MetaUtils { private static final Log LOG = LogFactory.getLog(MetaUtils.class); private final Configuration conf; + private final FSTableDescriptors descriptors; private FileSystem fs; private HLog log; private HRegion metaRegion; @@ -69,6 +70,7 @@ public class MetaUtils { this.conf = conf; conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); this.metaRegion = null; + this.descriptors = new FSTableDescriptors(conf); initialize(); } @@ -147,8 +149,7 @@ public class MetaUtils { return this.metaRegion; } this.metaRegion = HRegion.openHRegion(HRegionInfo.FIRST_META_REGIONINFO, - HTableDescriptor.META_TABLEDESC, getLog(), - this.conf); + descriptors.get(TableName.META_TABLE_NAME), getLog(), this.conf); this.metaRegion.compactStores(); return this.metaRegion; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java index e08530d..d042f1c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -73,6 +74,14 @@ public abstract class HBaseTestCase extends TestCase { protected final HBaseTestingUtility testUtil = new HBaseTestingUtility(); public volatile Configuration conf = HBaseConfiguration.create(); + public final FSTableDescriptors fsTableDescriptors; + { + try { + fsTableDescriptors = new FSTableDescriptors(conf); + } catch (IOException e) { + throw new RuntimeException("Failed to init descriptors", e); + } + } /** constructor */ public HBaseTestCase() { @@ -629,8 +638,9 @@ public abstract class HBaseTestCase extends TestCase { * @throws IOException */ protected void createMetaRegion() throws IOException { - meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir, - conf, HTableDescriptor.META_TABLEDESC); + FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(conf); + meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir, conf, + fsTableDescriptors.get(TableName.META_TABLE_NAME)); } protected void closeRootAndMeta() throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index cc173f1..021b43a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -87,6 +87,7 @@ import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.tool.Canary; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; @@ -374,6 +375,17 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } /** + * @return META table descriptor + */ + public HTableDescriptor getMetaTableDescriptor() { + try { + return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME); + } catch (IOException e) { + throw new RuntimeException("Unable to create META table descriptor", e); + } + } + + /** * @return Where the DFS cluster will write data on the local subsystem. * Creates it if it does not exist already. A subdir of {@link #getBaseTestDir()} * @see #getTestFileSystem() diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java index b4085ca..906dfee 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java @@ -39,7 +39,7 @@ public class TestFSTableDescriptorForceCreation { final String name = "newTable2"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); Path rootdir = new Path(UTIL.getDataTestDir(), name); - FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir); + FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); assertTrue("Should create new table descriptor", fstd.createTableDescriptor(htd, false)); @@ -52,7 +52,7 @@ public class TestFSTableDescriptorForceCreation { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any detritus laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); - FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir); + FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); HTableDescriptor htd = new HTableDescriptor(name); fstd.add(htd); assertFalse("Should not create new table descriptor", fstd.createTableDescriptor(htd, false)); @@ -64,7 +64,7 @@ public class TestFSTableDescriptorForceCreation { final String name = "createNewTableNew2"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); Path rootdir = new Path(UTIL.getDataTestDir(), name); - FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir); + FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); fstd.createTableDescriptor(htd, false); assertTrue("Should create new table descriptor", diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java index 2534609..90d5842 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java @@ -20,22 +20,22 @@ package org.apache.hadoop.hbase; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.compress.Compression.Algorithm; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.regionserver.BloomType; import org.junit.experimental.categories.Category; - import org.junit.Test; /** Tests the HColumnDescriptor with appropriate arguments */ @Category(SmallTests.class) public class TestHColumnDescriptor { @Test - public void testPb() throws DeserializationException { - HColumnDescriptor hcd = new HColumnDescriptor( - HTableDescriptor.META_TABLEDESC.getColumnFamilies()[0]); + public void testPb() throws Exception { + HColumnDescriptor hcd = new HColumnDescriptor(HConstants.CATALOG_FAMILY) + .setInMemory(true) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL) + .setBloomFilterType(BloomType.NONE); final int v = 123; hcd.setBlocksize(v); hcd.setTimeToLive(v); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java index 82a132f..bb107f3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java @@ -44,7 +44,7 @@ public class TestHTableDescriptor { @Test public void testPb() throws DeserializationException, IOException { - HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC); + HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME); final int v = 123; htd.setMaxFileSize(v); htd.setDurability(Durability.ASYNC_WAL); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java index 053a946..d6ca7be 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java @@ -1142,13 +1142,13 @@ public class TestAdmin { public void testCreateBadTables() throws IOException { String msg = null; try { - this.admin.createTable(HTableDescriptor.META_TABLEDESC); + this.admin.createTable(new HTableDescriptor(TableName.META_TABLE_NAME)); } catch(TableExistsException e) { msg = e.toString(); } assertTrue("Unexcepted exception message " + msg, msg != null && msg.startsWith(TableExistsException.class.getName()) && - msg.contains(HTableDescriptor.META_TABLEDESC.getTableName().getNameAsString())); + msg.contains(TableName.META_TABLE_NAME.getNameAsString())); // Now try and do concurrent creation with a bunch of threads. final HTableDescriptor threadDesc = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java index 0951d27..76a440f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java @@ -201,7 +201,7 @@ public class TestMasterFailover { FileSystem filesystem = FileSystem.get(conf); Path rootdir = FSUtils.getRootDir(conf); - FSTableDescriptors fstd = new FSTableDescriptors(filesystem, rootdir); + FSTableDescriptors fstd = new FSTableDescriptors(conf, filesystem, rootdir); // Write the .tableinfo fstd.createTableDescriptor(htdEnabled); @@ -547,7 +547,7 @@ public class TestMasterFailover { htdEnabled.addFamily(new HColumnDescriptor(FAMILY)); FileSystem filesystem = FileSystem.get(conf); Path rootdir = FSUtils.getRootDir(conf); - FSTableDescriptors fstd = new FSTableDescriptors(filesystem, rootdir); + FSTableDescriptors fstd = new FSTableDescriptors(conf, filesystem, rootdir); // Write the .tableinfo fstd.createTableDescriptor(htdEnabled); HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getTableName(), @@ -1199,7 +1199,7 @@ public class TestMasterFailover { FileSystem filesystem = FileSystem.get(conf); Path rootdir = FSUtils.getRootDir(conf); - FSTableDescriptors fstd = new FSTableDescriptors(filesystem, rootdir); + FSTableDescriptors fstd = new FSTableDescriptors(conf, filesystem, rootdir); fstd.createTableDescriptor(offlineTable); HRegionInfo hriOffline = new HRegionInfo(offlineTable.getTableName(), null, null); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java index 4dcd3ab..8cdf4fa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java @@ -305,7 +305,7 @@ public class TestNamespaceUpgrade { // Create a Region HTableDescriptor aclTable = new HTableDescriptor(TableName.valueOf("testACLTable")); aclTable.addFamily(new HColumnDescriptor(FAMILY)); - FSTableDescriptors fstd = new FSTableDescriptors(fs, rootDir); + FSTableDescriptors fstd = new FSTableDescriptors(conf, fs, rootDir); fstd.createTableDescriptor(aclTable); HRegionInfo hriAcl = new HRegionInfo(aclTable.getTableName(), null, null); HRegion region = HRegion.createHRegion(hriAcl, rootDir, conf, aclTable); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index 8a5fbfe..56e1e67 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -68,10 +68,9 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { FileSystem filesystem = FileSystem.get(conf); Path rootdir = testDir; // Up flush size else we bind up when we use default catalog flush of 16k. - HTableDescriptor.META_TABLEDESC.setMemStoreFlushSize(64 * 1024 * 1024); - + fsTableDescriptors.get(TableName.META_TABLE_NAME).setMemStoreFlushSize(64 * 1024 * 1024); HRegion mr = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, - rootdir, this.conf, HTableDescriptor.META_TABLEDESC); + rootdir, this.conf, fsTableDescriptors.get(TableName.META_TABLE_NAME)); try { // Write rows for three tables 'A', 'B', and 'C'. for (char c = 'A'; c < 'D'; c++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java index 1ee256b..d1b1229 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.MD5Hash; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -53,15 +54,16 @@ public class TestHRegionInfo { HBaseTestingUtility htu = new HBaseTestingUtility(); HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO; Path basedir = htu.getDataTestDir(); + FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration()); // Create a region. That'll write the .regioninfo file. HRegion r = HRegion.createHRegion(hri, basedir, htu.getConfiguration(), - HTableDescriptor.META_TABLEDESC); + fsTableDescriptors.get(TableName.META_TABLE_NAME)); // Get modtime on the file. long modtime = getModTime(r); HRegion.closeHRegion(r); Thread.sleep(1001); - r = HRegion.openHRegion(basedir, hri, HTableDescriptor.META_TABLEDESC, - null, htu.getConfiguration()); + r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), + null, htu.getConfiguration()); // Ensure the file is not written for a second time. long modtime2 = getModTime(r); assertEquals(modtime, modtime2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java index 6831869..5011f66 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java @@ -28,9 +28,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -124,8 +124,8 @@ public class TestLogRollingNoCluster { edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY)); this.wal.append(HRegionInfo.FIRST_META_REGIONINFO, - HTableDescriptor.META_TABLEDESC.getTableName(), - edit, now, HTableDescriptor.META_TABLEDESC, sequenceId); + TableName.META_TABLE_NAME, + edit, now, TEST_UTIL.getMetaTableDescriptor(), sequenceId); } String msg = getName() + " finished"; if (isException()) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java index 95268e9..a7c7a0f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java @@ -267,7 +267,7 @@ public class TestExportSnapshot { Path tableDir = FSUtils.getTableDir(archiveDir, tableWithRefsName); HTableDescriptor htd = new HTableDescriptor(tableWithRefsName); htd.addFamily(new HColumnDescriptor(TEST_FAMILY)); - new FSTableDescriptors(fs, rootDir) + new FSTableDescriptors(conf, fs, rootDir) .createTableDescriptorForTableDirectory(tableDir, htd, false); Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index cbe8016..70e95b0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -70,7 +70,7 @@ public class TestFSTableDescriptors { Path testdir = UTIL.getDataTestDir("testCreateAndUpdate"); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testCreate")); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir); + FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir); assertTrue(fstd.createTableDescriptor(htd)); assertFalse(fstd.createTableDescriptor(htd)); FileStatus [] statuses = fs.listStatus(testdir); @@ -91,7 +91,7 @@ public class TestFSTableDescriptors { HTableDescriptor htd = new HTableDescriptor( TableName.valueOf("testSequenceidAdvancesOnTableInfo")); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir); + FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir); Path p0 = fstd.updateTableDescriptor(htd); int i0 = FSTableDescriptors.getTableInfoSequenceId(p0); Path p1 = fstd.updateTableDescriptor(htd); @@ -144,7 +144,7 @@ public class TestFSTableDescriptors { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any detrius laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); - TableDescriptors htds = new FSTableDescriptors(fs, rootdir); + TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); htds.add(htd); assertNotNull(htds.remove(htd.getTableName())); @@ -156,7 +156,7 @@ public class TestFSTableDescriptors { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); Path rootdir = UTIL.getDataTestDir(name); - FSTableDescriptors fstd = new FSTableDescriptors(fs, rootdir); + FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); fstd.createTableDescriptor(htd); HTableDescriptor htd2 = FSTableDescriptors.getTableDescriptorFromFs(fs, rootdir, htd.getTableName()); @@ -169,7 +169,7 @@ public class TestFSTableDescriptors { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any debris laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); - FSTableDescriptors htds = new FSTableDescriptors(fs, rootdir) { + FSTableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir) { @Override public HTableDescriptor get(TableName tablename) throws TableExistsException, FileNotFoundException, IOException { @@ -215,7 +215,7 @@ public class TestFSTableDescriptors { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any detrius laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); - TableDescriptors htds = new FSTableDescriptors(fs, rootdir); + TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); assertNull("There shouldn't be any HTD for this table", htds.get(TableName.valueOf("NoSuchTable"))); } @@ -226,7 +226,7 @@ public class TestFSTableDescriptors { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); // Cleanup old tests if any detrius laying around. Path rootdir = new Path(UTIL.getDataTestDir(), name); - TableDescriptors htds = new FSTableDescriptors(fs, rootdir); + TableDescriptors htds = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name)); htds.add(htd); htds.add(htd); @@ -264,7 +264,8 @@ public class TestFSTableDescriptors { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); try { // .tmp dir is an invalid table name - new FSTableDescriptors(fs, FSUtils.getRootDir(UTIL.getConfiguration())) + new FSTableDescriptors(UTIL.getConfiguration(), fs, + FSUtils.getRootDir(UTIL.getConfiguration())) .get(TableName.valueOf(HConstants.HBASE_TEMP_DIRECTORY)); fail("Shouldn't be able to read a table descriptor for the archive directory."); } catch (Exception e) { @@ -279,7 +280,7 @@ public class TestFSTableDescriptors { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf( "testCreateTableDescriptorUpdatesIfThereExistsAlready")); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - FSTableDescriptors fstd = new FSTableDescriptors(fs, testdir); + FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, testdir); assertTrue(fstd.createTableDescriptor(htd)); assertFalse(fstd.createTableDescriptor(htd)); htd.setValue(Bytes.toBytes("mykey"), Bytes.toBytes("myValue")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java index b4f4d2d..0c9d463 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java @@ -165,8 +165,7 @@ public class TestHBaseFsck { // Now let's mess it up and change the assignment in hbase:meta to // point to a different region server - HTable meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName(), - executorService); + HTable meta = new HTable(conf, TableName.META_TABLE_NAME, executorService); Scan scan = new Scan(); scan.setStartRow(Bytes.toBytes(table+",,")); ResultScanner scanner = meta.getScanner(scan); @@ -1333,8 +1332,7 @@ public class TestHBaseFsck { Bytes.toBytes("C"), true, true, false); // Create a new meta entry to fake it as a split parent. - meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName(), - executorService); + meta = new HTable(conf, TableName.META_TABLE_NAME, executorService); HRegionInfo hri = location.getRegionInfo(); HRegionInfo a = new HRegionInfo(tbl.getName(), @@ -1408,7 +1406,7 @@ public class TestHBaseFsck { TEST_UTIL.getHBaseAdmin().flush(table.getName()); HRegionLocation location = tbl.getRegionLocation("B"); - meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName()); + meta = new HTable(conf, TableName.META_TABLE_NAME); HRegionInfo hri = location.getRegionInfo(); // do a regular split @@ -1458,7 +1456,7 @@ public class TestHBaseFsck { TEST_UTIL.getHBaseAdmin().flush(table.getName()); HRegionLocation location = tbl.getRegionLocation("B"); - meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName()); + meta = new HTable(conf, TableName.META_TABLE_NAME); HRegionInfo hri = location.getRegionInfo(); // do a regular split @@ -1964,7 +1962,7 @@ public class TestHBaseFsck { // Mess it up by removing the RegionInfo for one region. final List deletes = new LinkedList(); - HTable meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName()); + HTable meta = new HTable(conf, TableName.META_TABLE_NAME); MetaScanner.metaScan(conf, new MetaScanner.MetaScannerVisitor() { @Override @@ -2205,7 +2203,7 @@ public class TestHBaseFsck { LOG.info("deleting hdfs .regioninfo data: " + hri.toString() + hsa.toString()); Path rootDir = FSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); - Path p = new Path(rootDir + "/" + HTableDescriptor.META_TABLEDESC.getNameAsString(), + Path p = new Path(rootDir + "/" + TableName.META_TABLE_NAME.getNameAsString(), hri.getEncodedName()); Path hriPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE); fs.delete(hriPath, true); @@ -2215,7 +2213,7 @@ public class TestHBaseFsck { LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString()); Path rootDir = FSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); - Path p = new Path(rootDir + "/" + HTableDescriptor.META_TABLEDESC.getNameAsString(), + Path p = new Path(rootDir + "/" + TableName.META_TABLE_NAME.getNameAsString(), hri.getEncodedName()); HBaseFsck.debugLsr(conf, p); boolean success = fs.delete(p, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java index c2e7ef1..f2d4e8e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java @@ -95,7 +95,7 @@ public class TestMergeTable { // Create regions and populate them at same time. Create the tabledir // for them first. - new FSTableDescriptors(fs, rootdir).createTableDescriptor(desc); + new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir).createTableDescriptor(desc); HRegion [] regions = { createRegion(desc, null, row_70001, 1, 70000, rootdir), createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir), @@ -158,7 +158,7 @@ public class TestMergeTable { throws IOException { HRegion meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, rootdir, - UTIL.getConfiguration(), HTableDescriptor.META_TABLEDESC); + UTIL.getConfiguration(), UTIL.getMetaTableDescriptor()); for (HRegion r: regions) { HRegion.addRegionToMETA(meta, r); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java index 563d51d..beab21f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java @@ -146,7 +146,7 @@ public class TestMergeTool extends HBaseTestCase { try { // Create meta region createMetaRegion(); - new FSTableDescriptors(this.fs, this.testDir).createTableDescriptor(this.desc); + new FSTableDescriptors(conf, this.fs, this.testDir).createTableDescriptor(this.desc); /* * Create the regions we will merge */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java index 4a5f84d..42ea88c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/OfflineMetaRebuildTestCore.java @@ -276,7 +276,7 @@ public class OfflineMetaRebuildTestCore { */ protected int scanMeta() throws IOException { int count = 0; - HTable meta = new HTable(conf, HTableDescriptor.META_TABLEDESC.getTableName()); + HTable meta = new HTable(conf, TableName.META_TABLE_NAME); ResultScanner scanner = meta.getScanner(new Scan()); LOG.info("Table: " + Bytes.toString(meta.getTableName())); for (Result res : scanner) { -- 1.7.12.4 (Apple Git-37)