diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 24af4ab..aa67314 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilySchema; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema; -import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; @@ -153,6 +152,13 @@ public class HTableDescriptor implements Comparable { new Bytes(Bytes.toBytes(IS_META)); /** + * @see #getMetaVersion() + */ + public static final String META_VERSION = "META_VERSION"; + public static final Bytes META_VERSION_KEY = + new Bytes(Bytes.toBytes(META_VERSION)); + + /** * INTERNAL Used by HBase Shell interface to access this metadata * attribute which denotes if the deferred log flush option is enabled. * @deprecated Use {@link #DURABILITY} instead. @@ -362,11 +368,46 @@ public class HTableDescriptor implements Comparable { * IS_ROOT_KEY is set if its a -ROOT- table * IS_META_KEY is set either if its a -ROOT- or a hbase:meta table * Called by constructors. + * For meta regions current version of meta will be added/removed * @param name */ private void setMetaFlags(final TableName name) { setMetaRegion(isRootRegion() || - name.equals(TableName.META_TABLE_NAME)); + name.equals(TableName.META_TABLE_NAME)); + if (isMetaRegion()) { + byte[] version = getValue(META_VERSION_KEY); + if (version == null) { + setMetaVersion(HConstants.META_VERSION); + } + } else { + remove(META_VERSION_KEY); + } + } + + /** + * Set hbase:meta version + * @param metaVersion version to set + * @return this + * @throws java.lang.IllegalArgumentException if version is tried to be set on non meta table + */ + public HTableDescriptor setMetaVersion(int metaVersion) { + if (!isMetaTable()) { + throw new IllegalArgumentException( + "Can't set meta version for non hbase:meta table descriptor"); + } + setValue(META_VERSION_KEY, Integer.toString(metaVersion)); + return this; + } + + /** + * Version of hbase:meta used + */ + public int getMetaVersion() { + byte[] value = getValue(META_VERSION_KEY); + if (value == null) { + return HConstants.NO_VERSION_OF_META_VERSION; + } + return Integer.parseInt(Bytes.toString(value)); } /** @@ -1353,49 +1394,6 @@ public class HTableDescriptor implements Comparable { new Path(name.getNamespaceAsString(), new Path(name.getQualifierAsString())))); } - /** Table descriptor for hbase:meta catalog table - * Deprecated, use TableDescriptors#get(TableName.META_TABLE) or - * Admin#getTableDescriptor(TableName.META_TABLE) instead. - */ - @Deprecated - public static final HTableDescriptor META_TABLEDESC = new HTableDescriptor( - TableName.META_TABLE_NAME, - new HColumnDescriptor[] { - new HColumnDescriptor(HConstants.CATALOG_FAMILY) - // Ten is arbitrary number. Keep versions to help debugging. - .setMaxVersions(10) - .setInMemory(true) - .setBlocksize(8 * 1024) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. - .setBloomFilterType(BloomType.NONE) - // Enable cache of data blocks in L1 if more than one caching tier deployed: - // e.g. if using CombinedBlockCache (BucketCache). - .setCacheDataInL1(true), - new HColumnDescriptor(HConstants.TABLE_FAMILY) - // Ten is arbitrary number. Keep versions to help debugging. - .setMaxVersions(10) - .setInMemory(true) - .setBlocksize(8 * 1024) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - // Disable blooms for meta. Needs work. Seems to mess w/ getClosestOrBefore. - .setBloomFilterType(BloomType.NONE) - // Enable cache of data blocks in L1 if more than one caching tier deployed: - // e.g. if using CombinedBlockCache (BucketCache). - .setCacheDataInL1(true) - }); - - static { - try { - META_TABLEDESC.addCoprocessor( - "org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint", - null, Coprocessor.PRIORITY_SYSTEM, null); - } catch (IOException ex) { - //LOG.warn("exception in loading coprocessor for the hbase:meta table"); - throw new RuntimeException(ex); - } - } - public final static String NAMESPACE_FAMILY_INFO = "info"; public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO); public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d"); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index ea29e4f..be8f72f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -83,6 +83,8 @@ public class MetaTableAccessor { * same table range (table, startKey, endKey). For every range, there will be at least one * HRI defined which is called default replica. * + * Meta Descriptor contains version tag META_VERSION. + * * Meta layout (as of 0.98 + HBASE-10070) is like: * * For each table there is single row in column family 'table' formatted: diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index 6cdfc8a..2cd7f43 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -398,9 +398,6 @@ public class HTable implements HTableInterface { public HTableDescriptor getTableDescriptor() throws IOException { // TODO: This is the same as HBaseAdmin.getTableDescriptor(). Only keep one. if (tableName == null) return null; - if (tableName.equals(TableName.META_TABLE_NAME)) { - return HTableDescriptor.META_TABLEDESC; - } HTableDescriptor htd = executeMasterCallable( new MasterCallable(getConnection()) { @Override diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 0b755d7..970ab77 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -26,7 +26,6 @@ import java.util.List; import java.util.UUID; import java.util.regex.Pattern; -import org.apache.commons.lang.ArrayUtils; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.util.Bytes; @@ -480,8 +479,14 @@ public final class HConstants { * - version 1 for 0.96+ keeps HRegionInfo data structures, but changes the * byte[] serialization from Writables to Protobuf. * See HRegionInfo.VERSION + * - version 2 keeps state of tables in separate family */ - public static final short META_VERSION = 1; + public static final int META_VERSION = 2; + + /** + * Denotes version of meta which has no version tag in descriptor + */ + public static final int NO_VERSION_OF_META_VERSION = 1; // Other constants @@ -1023,9 +1028,7 @@ public final class HConstants { /** Directories that are not HBase user table directories */ public static final List HBASE_NON_USER_TABLE_DIRS = - Collections.unmodifiableList(Arrays.asList((String[])ArrayUtils.addAll( - new String[] { TableName.META_TABLE_NAME.getNameAsString() }, - HBASE_NON_TABLE_DIRS.toArray()))); + Collections.unmodifiableList(HBASE_NON_TABLE_DIRS); /** Health script related settings. */ public static final String HEALTH_SCRIPT_LOC = "hbase.node.health.script.location"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index de28cdc..6657222 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -46,11 +46,11 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.HFileArchiver; -import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.util.MetaMigration; import org.apache.hadoop.hbase.wal.DefaultWALProvider; import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hbase.util.Bytes; @@ -465,14 +465,10 @@ public class MasterFileSystem { .migrateFSTableDescriptorsIfNecessary(fs, rd); } - // Create tableinfo-s for hbase:meta if not already there. - // assume, created table descriptor is for enabling table - // meta table is a system table, so descriptors are predefined, - // we should get them from registry. + // Meta table descriptor stored in the hdfs. + // try to create it. FSTableDescriptors fsd = new FSTableDescriptors(c, fs, rd); - fsd.createTableDescriptor( - new TableDescriptor(fsd.get(TableName.META_TABLE_NAME))); - + MetaMigration.migrateMeta(fsd); return rd; } @@ -511,7 +507,7 @@ public class MasterFileSystem { // not make it in first place. Turn off block caching for bootstrap. // Enable after. HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO); - HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME); + HTableDescriptor metaDescriptor = TableDescriptor.metaTableDescriptor(c); setInfoFamilyCachingForMeta(metaDescriptor, false); HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor, null); setInfoFamilyCachingForMeta(metaDescriptor, true); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java index d09bd8d..a696f5a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java @@ -1464,6 +1464,12 @@ public class RSRpcServices implements HBaseRPCErrorHandler, // If there is no action in progress, we can submit a specific handler. // Need to pass the expected version in the constructor. if (region.isMetaRegion()) { + // check that our meta is compatible with what came from descriptor + if (htd.getMetaVersion() != HConstants.META_VERSION) { + throw new HBaseIOException("Mismatched meta version " + + " table descriptor has " + htd.getMetaVersion() + + " but " + HConstants.META_VERSION + " was expected "); + } regionServer.service.submit(new OpenMetaHandler( regionServer, regionServer, region, htd)); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index cce37d7..a686a29 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -71,6 +71,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @InterfaceAudience.Private public class FSTableDescriptors implements TableDescriptors { private static final Log LOG = LogFactory.getLog(FSTableDescriptors.class); + private final Configuration conf; private final FileSystem fs; private final Path rootdir; private final boolean fsreadonly; @@ -92,11 +93,6 @@ public class FSTableDescriptors implements TableDescriptors { new ConcurrentHashMap(); /** - * Table descriptor for hbase:meta catalog table - */ - private final HTableDescriptor metaTableDescritor; - - /** * Construct a FSTableDescriptors instance using the hbase root dir of the given * conf and the filesystem where that root dir lives. * This instance can do write operations (is not read only). @@ -117,12 +113,11 @@ public class FSTableDescriptors implements TableDescriptors { public FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir, final boolean fsreadonly, final boolean usecache) throws IOException { super(); + this.conf = conf; this.fs = fs; this.rootdir = rootdir; this.fsreadonly = fsreadonly; this.usecache = usecache; - - this.metaTableDescritor = TableDescriptor.metaTableDescriptor(conf); } public void setCacheOn() throws IOException { @@ -151,11 +146,8 @@ public class FSTableDescriptors implements TableDescriptors { public TableDescriptor getDescriptor(final TableName tablename) throws IOException { invocations++; - if (TableName.META_TABLE_NAME.equals(tablename)) { - cachehits++; - return new TableDescriptor(metaTableDescritor); - } - // hbase:meta is already handled. If some one tries to get the descriptor for + + // If some one tries to get the descriptor for // .logs, .oldlogs or .corrupt throw an exception. if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tablename.getNameAsString())) { throw new IOException("No descriptor found for non table = " + tablename); @@ -187,6 +179,10 @@ public class FSTableDescriptors implements TableDescriptors { return tdmt; } + public Configuration getConfiguration() { + return conf; + } + /** * Get the current table descriptor for the given table, or null if none exists. * @@ -194,11 +190,8 @@ public class FSTableDescriptors implements TableDescriptors { * to see if a newer file has been created since the cached one was read. */ @Override + @Nullable public HTableDescriptor get(TableName tableName) throws IOException { - if (TableName.META_TABLE_NAME.equals(tableName)) { - cachehits++; - return metaTableDescritor; - } TableDescriptor descriptor = getDescriptor(tableName); return descriptor == null ? null : descriptor.getHTableDescriptor(); } @@ -215,9 +208,6 @@ public class FSTableDescriptors implements TableDescriptors { for (Map.Entry entry: this.cache.entrySet()) { tds.put(entry.getKey().toString(), entry.getValue()); } - // add hbase:meta to the response - tds.put(this.metaTableDescritor.getNameAsString(), - new TableDescriptor(metaTableDescritor)); } else { LOG.debug("Fetching table descriptors from the filesystem."); boolean allvisited = true; @@ -289,9 +279,6 @@ public class FSTableDescriptors implements TableDescriptors { throw new NotImplementedException("Cannot add a table descriptor - in read only mode"); } TableName tableName = htd.getHTableDescriptor().getTableName(); - if (TableName.META_TABLE_NAME.equals(tableName)) { - throw new NotImplementedException(); - } if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) { throw new NotImplementedException( "Cannot add a table descriptor for a reserved subdirectory name: " @@ -310,9 +297,6 @@ public class FSTableDescriptors implements TableDescriptors { throw new NotImplementedException("Cannot add a table descriptor - in read only mode"); } TableName tableName = htd.getTableName(); - if (TableName.META_TABLE_NAME.equals(tableName)) { - throw new NotImplementedException(); - } if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName.getNameAsString())) { throw new NotImplementedException( "Cannot add a table descriptor for a reserved subdirectory name: " @@ -655,6 +639,10 @@ public class FSTableDescriptors implements TableDescriptors { private static void deleteTableDescriptorFiles(FileSystem fs, Path dir, int maxSequenceId) throws IOException { FileStatus [] status = FSUtils.listStatus(fs, dir, TABLEINFO_PATHFILTER); + if (status == null) { + LOG.debug("Deleted table descriptor with no table files in " + dir); + return; + } for (FileStatus file : status) { Path path = file.getPath(); int sequenceId = getTableInfoSequenceId(path); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 67e3411..d921f8b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -1303,7 +1303,7 @@ public class HBaseFsck extends Configured implements Closeable { Path rootdir = FSUtils.getRootDir(getConf()); Configuration c = getConf(); HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO); - HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME); + HTableDescriptor metaDescriptor = TableDescriptor.metaTableDescriptor(c); MasterFileSystem.setInfoFamilyCachingForMeta(metaDescriptor, false); // The WAL subsystem will use the default rootDir rather than the passed in rootDir // unless I pass along via the conf. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaMigration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaMigration.java new file mode 100644 index 0000000..f4c097d --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/MetaMigration.java @@ -0,0 +1,76 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableDescriptor; +import org.apache.hadoop.hbase.TableName; + +import java.io.IOException; + +/** + * Class supports various meta migration procedures. + */ +public class MetaMigration { + + private static final Log LOG = LogFactory.getLog(MetaMigration.class); + + public static void migrateMeta(FSTableDescriptors fsd) throws IOException { + Configuration conf = fsd.getConfiguration(); + HTableDescriptor compiledMetaDescriptor = TableDescriptor.metaTableDescriptor(conf); + HTableDescriptor current = fsd.get(TableName.META_TABLE_NAME); + if (current == null) { + LOG.info("No Meta found, creating new one"); + fsd.add(compiledMetaDescriptor); + } else if (current.getMetaVersion() < HConstants.META_VERSION) { + LOG.info("Meta has different version, running migration " + + current.getMetaVersion() + " -> " + HConstants.META_VERSION); + MetaMigration.doMigration(fsd, current, compiledMetaDescriptor); + } else if (current.getMetaVersion() > HConstants.META_VERSION) { + throw new IllegalStateException( + "Meta is more recent that we know about, rebuild it with hbck if needed"); + } + } + + static void doMigration(FSTableDescriptors fsd, HTableDescriptor current, + HTableDescriptor newDescriptor) throws IOException { + switch (current.getMetaVersion()) { + case HConstants.NO_VERSION_OF_META_VERSION: + // currently we only migrate descriptor, but here can be full meta migration + // including rewriting internal data structures if needed + fsd.add(newDescriptor); + break; + case 2: + // only new table family was added, so try to preserve descriptor values for regions family + HTableDescriptor copy = new HTableDescriptor(current) + .addFamily(newDescriptor.getFamily(HConstants.TABLE_FAMILY)) + .setMetaVersion(HConstants.META_VERSION); + fsd.add(copy); + break; + default: + throw new IllegalArgumentException( + "Should never happens, but if happens seems we need to add migration code here for version " + + newDescriptor.getMetaVersion()); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java index 0d5b27e..99461d2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java @@ -643,9 +643,8 @@ public abstract class HBaseTestCase extends TestCase { * @throws IOException */ protected void createMetaRegion() throws IOException { - FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(conf); meta = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, testDir, - conf, fsTableDescriptors.get(TableName.META_TABLE_NAME)); + conf, TableDescriptor.metaTableDescriptor(conf)); } protected void closeRootAndMeta() throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 9cb0d57c..d693897 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -407,7 +407,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { */ public HTableDescriptor getMetaTableDescriptor() { try { - return new FSTableDescriptors(conf).get(TableName.META_TABLE_NAME); + return TableDescriptor.metaTableDescriptor(conf); } catch (IOException e) { throw new RuntimeException("Unable to create META table descriptor", e); } @@ -1174,6 +1174,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { FSUtils.setRootDir(this.conf, hbaseRootdir); fs.mkdirs(hbaseRootdir); FSUtils.setVersion(fs, hbaseRootdir); + new FSTableDescriptors(conf).createTableDescriptor(TableDescriptor.metaTableDescriptor(conf)); return hbaseRootdir; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java index 982b977..c3184d2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/FilterTestingCluster.java @@ -40,8 +40,6 @@ import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; -import org.apache.hadoop.hbase.client.HBaseAdmin; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.ScannerCallable; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.ipc.AbstractRpcClient; @@ -128,7 +126,12 @@ public class FilterTestingCluster { @AfterClass public static void tearDown() throws Exception { deleteTables(); - connection.close(); + if (admin != null) { + admin.close(); + } + if (connection != null) { + connection.close(); + } TEST_UTIL.shutdownMiniCluster(); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java index 0534643..56ac16d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFileSystem.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.master; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; import java.util.HashSet; @@ -30,8 +31,11 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SplitLogTask; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.FSUtils; @@ -116,4 +120,24 @@ public class TestMasterFileSystem { zkw.close(); } + @Test + public void testMetaPreserved() throws Exception { + HMaster master = UTIL.getMiniHBaseCluster().getMaster(); + + TableDescriptors tableDescriptors = master.getTableDescriptors(); + HTableDescriptor mtd = tableDescriptors.get(TableName.META_TABLE_NAME); + // forcing parameter to differ wrom what is expected + mtd.setCompactionEnabled(false); + mtd.setRegionReplication(55); + tableDescriptors.add(mtd); + + UTIL.getHBaseCluster().stopMaster(master.getServerName()); + UTIL.getHBaseCluster().waitForMasterToStop(master.getServerName(), 10000); + HMaster master2 = UTIL.getHBaseCluster().startMaster().getMaster(); + UTIL.getHBaseCluster().waitForActiveAndReadyMaster(10000); + HTableDescriptor mtd2 = master2.getTableDescriptors().get(TableName.META_TABLE_NAME); + assertNotEquals(mtd, mtd2); + assertNotEquals(55, mtd2.getRegionReplication()); + } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index 110cd36..b6fc54a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; @@ -73,10 +74,11 @@ public class TestGetClosestAtOrBefore extends HBaseTestCase { FileSystem filesystem = FileSystem.get(conf); Path rootdir = testDir; // Up flush size else we bind up when we use default catalog flush of 16k. - fsTableDescriptors.get(TableName.META_TABLE_NAME).setMemStoreFlushSize(64 * 1024 * 1024); + HTableDescriptor metaHtd = fsTableDescriptors.get(TableName.META_TABLE_NAME); + metaHtd.setMemStoreFlushSize(64 * 1024 * 1024); - Region mr = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, - rootdir, this.conf, fsTableDescriptors.get(TableName.META_TABLE_NAME)); + HRegion mr = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, + rootdir, this.conf, metaHtd); try { // Write rows for three tables 'A', 'B', and 'C'. for (char c = 'A'; c < 'D'; c++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java index 5fde726..9172a4d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionInfo.java @@ -32,6 +32,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.TableName; @@ -40,7 +41,6 @@ import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.MD5Hash; import org.junit.Assert; import org.junit.Test; @@ -64,14 +64,14 @@ public class TestHRegionInfo { HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO; Path basedir = htu.getDataTestDir(); // Create a region. That'll write the .regioninfo file. - FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration()); + HTableDescriptor metaHtd = TableDescriptor.metaTableDescriptor(htu.getConfiguration()); HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, basedir, htu.getConfiguration(), - fsTableDescriptors.get(TableName.META_TABLE_NAME)); + metaHtd); // Get modtime on the file. long modtime = getModTime(r); HBaseTestingUtility.closeRegionAndWAL(r); Thread.sleep(1001); - r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), + r = HRegion.openHRegion(basedir, hri, metaHtd, null, htu.getConfiguration()); // Ensure the file is not written for a second time. long modtime2 = getModTime(r); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index c09982e..d8970d8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -32,7 +32,6 @@ import java.util.Map; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.hbase.client.TableState; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileStatus; @@ -301,7 +300,7 @@ public class TestFSTableDescriptors { htds.createTableDescriptor(htd); } // add hbase:meta - HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName()); + HTableDescriptor htd = TableDescriptor.metaTableDescriptor(UTIL.getConfiguration()); htds.createTableDescriptor(htd); assertEquals("getAll() didn't return all TableDescriptors, expected: " + @@ -336,11 +335,11 @@ public class TestFSTableDescriptors { assertTrue(nonchtds.getAll().size() == chtds.getAll().size()); // add a new entry for hbase:meta - HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName()); + HTableDescriptor htd = TableDescriptor.metaTableDescriptor(UTIL.getConfiguration()); nonchtds.createTableDescriptor(htd); - // hbase:meta will only increase the cachehit by 1 - assertTrue(nonchtds.getAll().size() == chtds.getAll().size()); + // hbase:meta should appear + assertEquals(nonchtds.getAll().size(), chtds.getAll().size() + 1); for (Map.Entry entry: nonchtds.getAll().entrySet()) { String t = (String) entry.getKey(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMetaMigration.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMetaMigration.java new file mode 100644 index 0000000..1b47c60 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMetaMigration.java @@ -0,0 +1,98 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import java.io.IOException; + +@Category({MiscTests.class, SmallTests.class}) +public class TestMetaMigration { + + private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private Configuration conf; + + @Before + public void init() throws IOException { + conf = TEST_UTIL.getConfiguration(); + TEST_UTIL.createRootDir(true); + FSTableDescriptors fsd = new FSTableDescriptors(conf); + fsd.deleteTableDescriptorIfExists(TableName.META_TABLE_NAME); + // initialize descriptor + fsd.createTableDescriptor(TableDescriptor.metaTableDescriptor(conf)); + } + + @Test + public void testMigrationCreatesDescriptor() throws IOException { + FSTableDescriptors fsd = new FSTableDescriptors(TEST_UTIL.getConfiguration()); + fsd.deleteTableDescriptorIfExists(TableName.META_TABLE_NAME); + MetaMigration.migrateMeta(fsd); + HTableDescriptor created = fsd.get(TableName.META_TABLE_NAME); + assertNotNull(created); + assertEquals(created.getMetaVersion(), HConstants.META_VERSION); + } + + @Test + public void testMigrationNoVersionToRecent() throws IOException { + FSTableDescriptors fsd = new FSTableDescriptors(TEST_UTIL.getConfiguration()); + + HTableDescriptor cleanHtd = TableDescriptor.metaTableDescriptor(fsd.getConfiguration()); + // make it looks like some old one + HTableDescriptor changedHtd = new HTableDescriptor(cleanHtd); + changedHtd.remove(HTableDescriptor.META_VERSION_KEY); + changedHtd.removeFamily(HConstants.TABLE_FAMILY); + fsd.add(changedHtd); + MetaMigration.migrateMeta(fsd); + HTableDescriptor noVersionHtdMigrated = fsd.get(TableName.META_TABLE_NAME); + assertNotNull(noVersionHtdMigrated); + assertEquals(HConstants.META_VERSION, noVersionHtdMigrated.getMetaVersion()); + assertEquals(noVersionHtdMigrated.getFamily(HConstants.TABLE_FAMILY), + cleanHtd.getFamily(HConstants.TABLE_FAMILY)); + } + + @Test + public void testDowngradeCheck() throws IOException { + FSTableDescriptors fsd = new FSTableDescriptors(TEST_UTIL.getConfiguration()); + HTableDescriptor newHtd = TableDescriptor.metaTableDescriptor(fsd.getConfiguration()); + newHtd.setMetaVersion(HConstants.META_VERSION + 1); + fsd.add(newHtd); + try { + MetaMigration.migrateMeta(fsd); + fail("Exception should be thrown"); + } catch (IllegalStateException iss) { + assertTrue(iss.getMessage(), iss.getMessage().contains("is more recent")); + } + } + +} \ No newline at end of file