diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index a0ab484..058c7ae 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -153,6 +153,13 @@ public class HTableDescriptor implements Comparable { new Bytes(Bytes.toBytes(IS_META)); /** + * @see #getMetaVersion() + */ + public static final String META_VERSION = "META_VERSION"; + public static final Bytes META_VERSION_KEY = + new Bytes(Bytes.toBytes(META_VERSION)); + + /** * INTERNAL Used by HBase Shell interface to access this metadata * attribute which denotes if the deferred log flush option is enabled. * @deprecated Use {@link #DURABILITY} instead. @@ -364,6 +371,22 @@ public class HTableDescriptor implements Comparable { private void setMetaFlags(final TableName name) { setMetaRegion(isRootRegion() || name.equals(TableName.META_TABLE_NAME)); + byte[] version = getValue(META_VERSION_KEY); + if (version == null) { + setMetaVersion(HConstants.NO_VERSION_OF_META_VERSION); + } + } + + public void setMetaVersion(int noVersionOfMetaVersion) { + setValue(META_VERSION_KEY, Integer.toString(noVersionOfMetaVersion)); + } + + public int getMetaVersion() { + byte[] value = getValue(META_VERSION_KEY); + if (value == null) { + return HConstants.NO_VERSION_OF_META_VERSION; + } + return Integer.parseInt(Bytes.toString(value)); } /** @@ -1350,49 +1373,6 @@ public class HTableDescriptor implements Comparable { new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString())))); } - /** Table descriptor for hbase:meta catalog table - * Deprecated, use TableDescriptors#get(TableName.META_TABLE) or - * Admin#getTableDescriptor(TableName.META_TABLE) instead. - */ - @Deprecated - public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor( - TableName.META_TABLE_NAME, - new HColumnDescriptor[] { - new HColumnDescriptor(HConstants.CATALOG_FAMILY) - // Ten is arbitrary number. Keep versions to help debugging. - .setMaxVersions(10) - .setInMemory(true) - .setBlocksize(8 * 1024) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. - .setBloomFilterType(BloomType.NONE) - // Enable cache of data blocks in L1 if more than one caching tier deployed: - // e.g. if using CombinedBlockCache (BucketCache). - .setCacheDataInL1(true), - new HColumnDescriptor(HConstants.TABLE_FAMILY) - // Ten is arbitrary number. Keep versions to help debugging. - .setMaxVersions(10) - .setInMemory(true) - .setBlocksize(8 * 1024) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. - .setBloomFilterType(BloomType.NONE) - // Enable cache of data blocks in L1 if more than one caching tier deployed: - // e.g. if using CombinedBlockCache (BucketCache). - .setCacheDataInL1(true) - }); - - static { - try { - META_TABLEDESC.addCoprocessor( - "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint", - null, Coprocessor.PRIORITY_SYSTEM, null); - } catch (IOException ex) { - //LOG.warn("exception in loading coprocessor for the hbase:meta table"); - throw new RuntimeException(ex); - } - } - public final static String NAMESPACE_FAMILY_INFO = "info"; public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO); public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d"); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 4df58a2..b260298 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -85,6 +85,8 @@ public class MetaTableAccessor { * same table range (table, startKey, endKey). For every range, there will be at least one * HRI defined which is called default replica. * + * Meta Descriptor contains version tag META_VERSION. + * * Meta layout (as of 0.98 + HBASE-10070) is like: * * For each table there is single row in column family 'table' formatted: diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index c77e2ae..a2576a6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -427,9 +427,6 @@ public class HTable implements HTableInterface { public HTableDescriptor getTableDescriptor() throws IOException { // TODO: This is the same as HBaseAdmin.getTableDescriptor(). Only keep one. if (tableName == null) return null; - if (tableName.equals(TableName.META_TABLE_NAME)) { - return HTableDescriptor.META_TABLEDESC; - } HTableDescriptor htd = executeMasterCallable( new MasterCallable(getConnection()) { @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 19e251a..4e0dc13 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -480,8 +480,14 @@ public final class HConstants { * - version 1 for 0.96+ keeps HRegionInfo data structures, but changes the * byte[] serialization from Writables to Protobuf. * See HRegionInfo.VERSION + * - version 2 keeps state of tables in separate family */ - public static final short META_VERSION = 1; + public static final int META_VERSION = 2; + + /** + * Denotes version of meta which has no version tag in descriptor + */ + public static final int NO_VERSION_OF_META_VERSION = 1; // Other constants @@ -1006,9 +1012,7 @@ public final class HConstants { /** Directories that are not HBase user table directories */ public static final List HBASE_NON_USER_TABLE_DIRS = - Collections.unmodifiableList(Arrays.asList((String[])ArrayUtils.addAll( - new String[] { TableName.META_TABLE_NAME.getNameAsString() }, - HBASE_NON_TABLE_DIRS.toArray()))); + Collections.unmodifiableList(HBASE_NON_TABLE_DIRS); /** Health script related settings. */ public static final String HEALTH_SCRIPT_LOC = "hbase.node.health.script.location"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java index 642d108..62ef74f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/TableDescriptor.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.hadoop.hbase.util.Bytes; /** * Class represents table state on HDFS. @@ -156,6 +157,7 @@ public class TableDescriptor { .setCacheDataInL1(true) }) { }; + metaDescriptor.setMetaVersion(HConstants.META_VERSION); metaDescriptor.addCoprocessor( "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint", null, Coprocessor.PRIORITY_SYSTEM, null); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 78e4c11..04cfe22 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -18,18 +18,8 @@ */ package org.apache.hadoop.hbase.master; -import java.io.IOException; -import java.io.InterruptedIOException; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -46,19 +36,29 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.HFileArchiver; -import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.wal.DefaultWALProvider; -import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.MetaMigration; +import org.apache.hadoop.hbase.wal.DefaultWALProvider; +import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.ipc.RemoteException; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + /** * This class abstracts a bunch of operations the HMaster needs to interact with * the underlying file system, including splitting log files, checking file @@ -465,14 +465,10 @@ public class MasterFileSystem { .migrateFSTableDescriptorsIfNecessary(fs, rd); } - // Create tableinfo-s for hbase:meta if not already there. - // assume, created table descriptor is for enabling table - // meta table is a system table, so descriptors are predefined, - // we should get them from registry. + // Meta table descriptor stored in the hdfs. + // try to create it. FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd); - fsd.createTableDescriptor( - new TableDescriptor(fsd.get(TableName.META_TABLE_NAME))); - + MetaMigration.migrateMeta(fsd); return rd; } @@ -511,7 +507,7 @@ public class MasterFileSystem { // not make it in first place. Turn off block caching for bootstrap. // Enable after. HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO); - HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME); + HTableDescriptor metaDescriptor = TableDescriptor.metaTableDescriptor(c); setInfoFamilyCachingForMeta(metaDescriptor, false); HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor, null); setInfoFamilyCachingForMeta(metaDescriptor, true); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index 4316a85..a0aefd9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -1405,6 +1405,12 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // If there is no action in progress, we can submit a specific handler. // Need to pass the expected version in the constructor. if (region.isMetaRegion()) { + // check that our meta is compatible with ours + if (htd.getMetaVersion() != HConstants.META_VERSION) { + throw new HBaseIOException("Mismatched meta version " + + " table descriptor has " + htd.getMetaVersion() + + " but " + HConstants.META_VERSION + " was expected "); + } regionServer.service.submit(new OpenMetaHandler( regionServer, regionServer, region, htd)); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index cce37d7..c81c672 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -71,6 +71,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @InterfaceAudience.Private public class FSTableDescriptors implements TableDescriptors { private static final Log LOG = LogFactory.getLog(FSTableDescriptors.class); + private final Configuration conf; private final FileSystem fs; private final Path rootdir; private final boolean fsreadonly; @@ -92,11 +93,6 @@ public class FSTableDescriptors implements TableDescriptors { new ConcurrentHashMap(); /** - * Table descriptor for hbase:meta catalog table - */ - private final HTableDescriptor metaTableDescritor; - - /** * Construct a FSTableDescriptors instance using the hbase root dir of the given * conf and the filesystem where that root dir lives. * This instance can do write operations (is not read only). @@ -117,12 +113,11 @@ public class FSTableDescriptors implements TableDescriptors { public FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException { super(); + this.conf = conf; this.fs = fs; this.rootdir = rootdir; this.fsreadonly = fsreadonly; this.usecache = usecache; - - this.metaTableDescritor = TableDescriptor.metaTableDescriptor(conf); } public void setCacheOn() throws IOException { @@ -151,11 +146,8 @@ public class FSTableDescriptors implements TableDescriptors { public TableDescriptor getDescriptor(final TableName tablename) throws IOException { invocations++; - if (TableName.META_TABLE_NAME.equals(tablename)) { - cachehits++; - return new TableDescriptor(metaTableDescritor); - } - // hbase:meta is already handled. If some one tries to get the descriptor for + + // If some one tries to get the descriptor for // .logs, .oldlogs or .corrupt throw an exception. if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) { throw new IOException("No descriptor found for non table = " + tablename); @@ -187,6 +179,10 @@ public class FSTableDescriptors implements TableDescriptors { return tdmt; } + public Configuration getConfiguration() { + return conf; + } + /** * Get the current table descriptor for the given table, or null if none exists. * @@ -194,11 +190,8 @@ public class FSTableDescriptors implements TableDescriptors { * to see if a newer file has been created since the cached one was read. */ @Override + @Nullable public HTableDescriptor get(TableName tableName) throws IOException { - if (TableName.META_TABLE_NAME.equals(tableName)) { - cachehits++; - return metaTableDescritor; - } TableDescriptor descriptor = getDescriptor(tableName); return descriptor == null ? null : descriptor.getHTableDescriptor(); } @@ -215,9 +208,6 @@ public class FSTableDescriptors implements TableDescriptors { for (Map.Entry entry: this.cache.entrySet()) { tds.put(entry.getKey().toString(), entry.getValue()); } - // add hbase:meta to the response - tds.put(this.metaTableDescritor.getNameAsString(), - new TableDescriptor(metaTableDescritor)); } else { LOG.debug("Fetching table descriptors from the filesystem."); boolean allvisited = true; @@ -289,9 +279,6 @@ public class FSTableDescriptors implements TableDescriptors { throw new NotImplementedException("Cannot add a table descriptor - in read only mode"); } TableName tableName = htd.getHTableDescriptor().getTableName(); - if (TableName.META_TABLE_NAME.equals(tableName)) { - throw new NotImplementedException(); - } if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) { throw new NotImplementedException( "Cannot add a table descriptor for a reserved subdirectory name: " @@ -310,9 +297,6 @@ public class FSTableDescriptors implements TableDescriptors { throw new NotImplementedException("Cannot add a table descriptor - in read only mode"); } TableName tableName = htd.getTableName(); - if (TableName.META_TABLE_NAME.equals(tableName)) { - throw new NotImplementedException(); - } if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) { throw new NotImplementedException( "Cannot add a table descriptor for a reserved subdirectory name: " @@ -655,6 +639,10 @@ public class FSTableDescriptors implements TableDescriptors { private static void deleteTableDescriptorFiles(FileSystem fs, Path dir, int maxSequenceId) throws IOException { FileStatus [] status = FSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER); + if (status == null) { + LOG.debug("Deleted table descriptor with not table files in " + dir); + return; + } for (FileStatus file : status) { Path path = file.getPath(); int sequenceId = getTableInfoSequenceId(path); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 1fb64a2..85152a0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -1293,7 +1293,7 @@ public class HBaseFsck extends Configured implements Closeable { Path rootdir = FSUtils.getRootDir(getConf()); Configuration c = getConf(); HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO); - HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME); + HTableDescriptor metaDescriptor = TableDescriptor.metaTableDescriptor(c); MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, false); // The WAL subsystem will use the default rootDir rather than the passed in rootDir // unless I pass along via the conf. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java index 6002f29..fd152d0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.io.WritableComparator; import org.apache.hadoop.util.GenericOptionsParser; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaMigration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaMigration.java new file mode 100644 index 0000000..e55103a --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaMigration.java @@ -0,0 +1,58 @@ +package org.apache.hadoop.hbase.util; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableDescriptor; +import org.apache.hadoop.hbase.TableName; + +import java.io.IOException; + +/** + * Class supports various meta migration procedures. + */ +public class MetaMigration { + + private static final Log LOG = LogFactory.getLog(MetaMigration.class); + + public static void migrateMeta(FSTableDescriptors fsd) throws IOException { + Configuration conf = fsd.getConfiguration(); + HTableDescriptor compiledMetaDescriptor = TableDescriptor.metaTableDescriptor(conf); + HTableDescriptor current = fsd.get(TableName.META_TABLE_NAME); + if (current == null) { + LOG.info("No Meta found, creating new one"); + fsd.add(compiledMetaDescriptor); + } else if (current.getMetaVersion() < HConstants.META_VERSION) { + LOG.info("Meta has different version, running migration " + + current.getMetaVersion() + " -> " + HConstants.META_VERSION); + MetaMigration.doMigration(fsd, current, compiledMetaDescriptor); + } else if (current.getMetaVersion() > HConstants.META_VERSION) { + throw new IllegalStateException( + "Meta is more recent that we know about, rebuild it with hbck if needed"); + } + } + + static void doMigration(FSTableDescriptors fsd, HTableDescriptor current, + HTableDescriptor newDescriptor) throws IOException { + switch (current.getMetaVersion()) { + case HConstants.NO_VERSION_OF_META_VERSION: + // currently we only migrate descriptor, but here can be full meta migration + // including rewriting internal data structures if needed + fsd.add(newDescriptor); + break; + case 2: + // only new table family was added, so try to preserve descriptor values for regions family + HTableDescriptor copy = new HTableDescriptor(current); + copy.addFamily(newDescriptor.getFamily(HConstants.TABLE_FAMILY)); + copy.setMetaVersion(HConstants.META_VERSION); + fsd.add(copy); + break; + default: + throw new IllegalArgumentException( + "Should never happens, but if happens seems we need to add migration code here for version " + + newDescriptor.getMetaVersion()); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java index 8ea6178..38d904a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java @@ -640,9 +640,8 @@ public abstract class HBaseTestCase extends TestCase { * @throws IOException */ protected void createMetaRegion() throws IOException { - FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(conf); meta = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, testDir, - conf, fsTableDescriptors.get(TableName.META_TABLE_NAME)); + conf, TableDescriptor.metaTableDescriptor(conf)); } protected void closeRootAndMeta() throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 0445cb0..496c8ed 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -399,7 +399,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public HTableDescriptor getMetaTableDescriptor() { try { - return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME); + return TableDescriptor.metaTableDescriptor(conf); } catch (IOException e) { throw new RuntimeException("Unable to create META table descriptor", e); } @@ -1166,6 +1166,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { FSUtils.setRootDir(this.conf, hbaseRootdir); fs.mkdirs(hbaseRootdir); FSUtils.setVersion(fs, hbaseRootdir); + new FSTableDescriptors(conf).createTableDescriptor(TableDescriptor.metaTableDescriptor(conf)); return hbaseRootdir; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java index 0534643..56ac16d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.master; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import java.util.HashSet; @@ -30,8 +31,11 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogTask; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.FSUtils; @@ -116,4 +120,24 @@ public class TestMasterFileSystem { zkw.close(); } + @Test + public void testMetaPreserved() throws Exception { + HMaster master = UTIL.getMiniHBaseCluster().getMaster(); + + TableDescriptors tableDescriptors = master.getTableDescriptors(); + HTableDescriptor mtd = tableDescriptors.get(TableName.META_TABLE_NAME); + // forcing parameter to differ wrom what is expected + mtd.setCompactionEnabled(false); + mtd.setRegionReplication(55); + tableDescriptors.add(mtd); + + UTIL.getHBaseCluster().stopMaster(master.getServerName()); + UTIL.getHBaseCluster().waitForMasterToStop(master.getServerName(), 10000); + HMaster master2 = UTIL.getHBaseCluster().startMaster().getMaster(); + UTIL.getHBaseCluster().waitForActiveAndReadyMaster(10000); + HTableDescriptor mtd2 = master2.getTableDescriptors().get(TableName.META_TABLE_NAME); + assertNotEquals(mtd, mtd2); + assertNotEquals(55, mtd2.getRegionReplication()); + } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index 6c8c77a..a5ea1e9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -34,16 +34,17 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.TableDescriptor; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.regionserver.InternalScanner.NextState; -import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -73,10 +74,11 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { FileSystem filesystem = FileSystem.get(conf); Path rootdir = testDir; // Up flush size else we bind up when we use default catalog flush of 16k. - fsTableDescriptors.get(TableName.META_TABLE_NAME).setMemStoreFlushSize(64 * 1024 * 1024); + HTableDescriptor metaHtd = TableDescriptor.metaTableDescriptor(conf); + metaHtd.setMemStoreFlushSize(64 * 1024 * 1024); HRegion mr = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, - rootdir, this.conf, fsTableDescriptors.get(TableName.META_TABLE_NAME)); + rootdir, this.conf, metaHtd); try { // Write rows for three tables 'A', 'B', and 'C'. for (char c = 'A'; c < 'D'; c++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java index 5fde726..df3ea8f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java @@ -18,35 +18,34 @@ */ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - import java.io.IOException; +import com.google.protobuf.ByteString; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.MD5Hash; import org.junit.Assert; import org.junit.Test; import org.junit.experimental.categories.Category; -import com.google.protobuf.ByteString; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; @Category({RegionServerTests.class, SmallTests.class}) public class TestHRegionInfo { @@ -64,14 +63,14 @@ public class TestHRegionInfo { HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO; Path basedir = htu.getDataTestDir(); // Create a region. That'll write the .regioninfo file. - FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration()); + HTableDescriptor metaHtd = TableDescriptor.metaTableDescriptor(htu.getConfiguration()); HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, basedir, htu.getConfiguration(), - fsTableDescriptors.get(TableName.META_TABLE_NAME)); + metaHtd); // Get modtime on the file. long modtime = getModTime(r); HBaseTestingUtility.closeRegionAndWAL(r); Thread.sleep(1001); - r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), + r = HRegion.openHRegion(basedir, hri, metaHtd, null, htu.getConfiguration()); // Ensure the file is not written for a second time. long modtime2 = getModTime(r); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index c09982e..4e5efe0 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -301,7 +301,7 @@ public class TestFSTableDescriptors { htds.createTableDescriptor(htd); } // add hbase:meta - HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName()); + HTableDescriptor htd = TableDescriptor.metaTableDescriptor(UTIL.getConfiguration()); htds.createTableDescriptor(htd); assertEquals("getAll() didn't return all TableDescriptors, expected: " + @@ -336,11 +336,11 @@ public class TestFSTableDescriptors { assertTrue(nonchtds.getAll().size() == chtds.getAll().size()); // add a new entry for hbase:meta - HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName()); + HTableDescriptor htd = TableDescriptor.metaTableDescriptor(UTIL.getConfiguration()); nonchtds.createTableDescriptor(htd); - // hbase:meta will only increase the cachehit by 1 - assertTrue(nonchtds.getAll().size() == chtds.getAll().size()); + // hbase:meta should appear + assertEquals(nonchtds.getAll().size(), chtds.getAll().size() + 1); for (Map.Entry entry: nonchtds.getAll().entrySet()) { String t = (String) entry.getKey(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMetaMigration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMetaMigration.java new file mode 100644 index 0000000..3fc7084 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMetaMigration.java @@ -0,0 +1,80 @@ +package org.apache.hadoop.hbase.util; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.io.IOException; + +@Category({MiscTests.class, SmallTests.class}) +public class TestMetaMigration { + + private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private Configuration conf; + + @Before + public void init() throws IOException { + conf = TEST_UTIL.getConfiguration(); + TEST_UTIL.createRootDir(true); + FSTableDescriptors fsd = new FSTableDescriptors(conf); + fsd.deleteTableDescriptorIfExists(TableName.META_TABLE_NAME); + // initialize descriptor + fsd.createTableDescriptor(TableDescriptor.metaTableDescriptor(conf)); + } + + @Test + public void testMigrationCreatesDescriptor() throws IOException { + FSTableDescriptors fsd = new FSTableDescriptors(TEST_UTIL.getConfiguration()); + fsd.deleteTableDescriptorIfExists(TableName.META_TABLE_NAME); + MetaMigration.migrateMeta(fsd); + HTableDescriptor created = fsd.get(TableName.META_TABLE_NAME); + assertNotNull(created); + assertEquals(created.getMetaVersion(), HConstants.META_VERSION); + } + + @Test + public void testMigrationNoVersionToRecent() throws IOException { + FSTableDescriptors fsd = new FSTableDescriptors(TEST_UTIL.getConfiguration()); + + HTableDescriptor cleanHtd = TableDescriptor.metaTableDescriptor(fsd.getConfiguration()); + // make it looks like some old one + HTableDescriptor changedHtd = new HTableDescriptor(cleanHtd); + changedHtd.remove(HTableDescriptor.META_VERSION_KEY); + changedHtd.removeFamily(HConstants.TABLE_FAMILY); + fsd.add(changedHtd); + MetaMigration.migrateMeta(fsd); + HTableDescriptor noVersionHtdMigrated = fsd.get(TableName.META_TABLE_NAME); + assertNotNull(noVersionHtdMigrated); + assertEquals(HConstants.META_VERSION, noVersionHtdMigrated.getMetaVersion()); + assertEquals(noVersionHtdMigrated.getFamily(HConstants.TABLE_FAMILY), + cleanHtd.getFamily(HConstants.TABLE_FAMILY)); + } + + @Test + public void testDowngradeCheck() throws IOException { + FSTableDescriptors fsd = new FSTableDescriptors(TEST_UTIL.getConfiguration()); + HTableDescriptor newHtd = TableDescriptor.metaTableDescriptor(fsd.getConfiguration()); + newHtd.setMetaVersion(HConstants.META_VERSION + 1); + fsd.add(newHtd); + try { + MetaMigration.migrateMeta(fsd); + fail("Exception should be thrown"); + } catch (IllegalStateException iss) { + assertTrue(iss.getMessage().contains("downgrade")); + } + } + +} \ No newline at end of file