diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index fcfce3c..9e9d290 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -984,9 +984,7 @@ public final class HConstants { /** Directories that are not HBase user table directories */ public static final List HBASE_NON_USER_TABLE_DIRS = - Collections.unmodifiableList(Arrays.asList((String[])ArrayUtils.addAll( - new String[] { TableName.META_TABLE_NAME.getNameAsString() }, - HBASE_NON_TABLE_DIRS.toArray()))); + Collections.unmodifiableList(HBASE_NON_TABLE_DIRS); /** Health script related settings. */ public static final String HEALTH_SCRIPT_LOC = "hbase.node.health.script.location"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 93bde36..6eab89a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -29,14 +29,12 @@ import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.ClusterId; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -45,17 +43,19 @@ import org.apache.hadoop.hbase.InvalidFamilyOperationException; import org.apache.hadoop.hbase.RemoteExceptionHandler; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.HFileArchiver; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.wal.DefaultWALProvider; -import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.wal.DefaultWALProvider; +import org.apache.hadoop.hbase.wal.WALSplitter; /** * This class abstracts a bunch of operations the HMaster needs to interact with @@ -469,7 +469,7 @@ public class MasterFileSystem { // we should get them from registry. FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd); fsd.createTableDescriptor( - new HTableDescriptor(fsd.get(TableName.META_TABLE_NAME))); + HTableDescriptor.metaTableDescriptor(c)); return rd; } @@ -509,7 +509,7 @@ public class MasterFileSystem { // not make it in first place. Turn off block caching for bootstrap. // Enable after. HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO); - HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME); + HTableDescriptor metaDescriptor = HTableDescriptor.metaTableDescriptor(c); setInfoFamilyCachingForMeta(metaDescriptor, false); HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor); setInfoFamilyCachingForMeta(metaDescriptor, true); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 7cd2673..349439b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -91,11 +91,6 @@ public class FSTableDescriptors implements TableDescriptors { private final Map cache = new ConcurrentHashMap(); - /** - * Table descriptor for hbase:meta catalog table - */ - private final HTableDescriptor metaTableDescriptor; - /** * Construct a FSTableDescriptors instance using the hbase root dir of the given * conf and the filesystem where that root dir lives. @@ -121,7 +116,6 @@ public class FSTableDescriptors implements TableDescriptors { this.rootdir = rootdir; this.fsreadonly = fsreadonly; this.usecache = usecache; - this.metaTableDescriptor = HTableDescriptor.metaTableDescriptor(conf); } public void setCacheOn() throws IOException { @@ -149,11 +143,8 @@ public class FSTableDescriptors implements TableDescriptors { public HTableDescriptor get(final TableName tablename) throws IOException { invocations++; - if (TableName.META_TABLE_NAME.equals(tablename)) { - cachehits++; - return metaTableDescriptor; - } - // hbase:meta is already handled. If some one tries to get the descriptor for + + // If some one tries to get the descriptor for // .logs, .oldlogs or .corrupt throw an exception. if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) { throw new IOException("No descriptor found for non table = " + tablename); @@ -197,9 +188,6 @@ public class FSTableDescriptors implements TableDescriptors { for (Map.Entry entry: this.cache.entrySet()) { htds.put(entry.getKey().toString(), entry.getValue()); } - // add hbase:meta to the response - htds.put(HTableDescriptor.META_TABLEDESC.getTableName().getNameAsString(), - HTableDescriptor.META_TABLEDESC); } else { LOG.debug("Fetching table descriptors from the filesystem."); boolean allvisited = true; @@ -255,9 +243,6 @@ public class FSTableDescriptors implements TableDescriptors { if (fsreadonly) { throw new NotImplementedException("Cannot add a table descriptor - in read only mode"); } - if (TableName.META_TABLE_NAME.equals(htd.getTableName())) { - throw new NotImplementedException(); - } if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getTableName().getNameAsString())) { throw new NotImplementedException( "Cannot add a table descriptor for a reserved subdirectory name: " + htd.getNameAsString()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 601aff5..16ff2ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -1240,7 +1240,7 @@ public class HBaseFsck extends Configured implements Closeable { Path rootdir = FSUtils.getRootDir(getConf()); Configuration c = getConf(); HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO); - HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME); + HTableDescriptor metaDescriptor = HTableDescriptor.metaTableDescriptor(c); MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, false); HRegion meta = HRegion.createHRegion(metaHRI, rootdir, c, metaDescriptor); MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java index 5b58bbe..018523e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java @@ -639,9 +639,8 @@ public abstract class HBaseTestCase extends TestCase { * @throws IOException */ protected void createMetaRegion() throws IOException { - FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(conf); meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, testDir, conf, - fsTableDescriptors.get(TableName.META_TABLE_NAME)); + HTableDescriptor.metaTableDescriptor(conf)); } protected void closeRootAndMeta() throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index e58f6a2..b529278 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -385,7 +385,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public HTableDescriptor getMetaTableDescriptor() { try { - return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME); + return HTableDescriptor.metaTableDescriptor(conf); } catch (IOException e) { throw new RuntimeException("Unable to create META table descriptor", e); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java index e29bef8..67520bd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java @@ -18,31 +18,30 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - import java.io.IOException; +import com.google.protobuf.ByteString; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.MD5Hash; import org.junit.Test; import org.junit.experimental.categories.Category; -import com.google.protobuf.ByteString; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; @Category(SmallTests.class) public class TestHRegionInfo { @@ -59,15 +58,15 @@ public class TestHRegionInfo { HBaseTestingUtility htu = new HBaseTestingUtility(); HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO; Path basedir = htu.getDataTestDir(); - FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration()); // Create a region. That'll write the .regioninfo file. - HRegion r = HRegion.createHRegion(hri, basedir, htu.getConfiguration(), - fsTableDescriptors.get(TableName.META_TABLE_NAME)); + HTableDescriptor metaHtd = + HTableDescriptor.metaTableDescriptor(htu.getConfiguration()); + HRegion r = HRegion.createHRegion(hri, basedir, htu.getConfiguration(), metaHtd); // Get modtime on the file. long modtime = getModTime(r); HRegion.closeHRegion(r); Thread.sleep(1001); - r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), + r = HRegion.openHRegion(basedir, hri, metaHtd, null, htu.getConfiguration()); // Ensure the file is not written for a second time. long modtime2 = getModTime(r); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java index 8727e23..503417e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRollingNoCluster.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hbase.regionserver.wal; -import static org.junit.Assert.assertFalse; - import java.io.IOException; import java.util.concurrent.atomic.AtomicLong; @@ -32,8 +30,8 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; @@ -43,6 +41,8 @@ import org.apache.hadoop.hbase.wal.WALKey; import org.junit.Test; import org.junit.experimental.categories.Category; +import static org.junit.Assert.assertFalse; + /** * Test many concurrent appenders to an {@link #WAL} while rolling the log. */ @@ -135,8 +135,8 @@ public class TestLogRollingNoCluster { byte[] bytes = Bytes.toBytes(i); edit.add(new KeyValue(bytes, bytes, bytes, now, EMPTY_1K_ARRAY)); final HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO; - final FSTableDescriptors fts = new FSTableDescriptors(TEST_UTIL.getConfiguration()); - final HTableDescriptor htd = fts.get(TableName.META_TABLE_NAME); + final HTableDescriptor htd = + HTableDescriptor.metaTableDescriptor(TEST_UTIL.getConfiguration()); final long txid = wal.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(), TableName.META_TABLE_NAME, now), edit, sequenceId, true, null); wal.sync(txid);