Index: hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java =================================================================== --- hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java (revision 1518396) +++ hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java (working copy) @@ -72,9 +72,19 @@ public static final TableName NAMESPACE_TABLE_NAME = valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "namespace"); - private static final String OLD_META_STR = ".META."; - private static final String OLD_ROOT_STR = "-ROOT-"; + public static final String OLD_META_STR = ".META."; + public static final String OLD_ROOT_STR = "-ROOT-"; + /** + * TableName for old -ROOT- table. It is used to read/process old WALs which have + * ROOT edits. + */ + public static final TableName OLD_ROOT_TABLE_NAME = getADummyTableName(OLD_ROOT_STR); + /** + * TableName for old .META. table. Used in testing. + */ + public static final TableName OLD_META_TABLE_NAME = getADummyTableName(OLD_META_STR); + private byte[] name; private String nameAsString; private byte[] namespace; @@ -231,6 +241,18 @@ return ret; } + /** + * It is used to create table names for old META, and ROOT table. + * @return a dummy TableName instance (with no validation) for the passed qualifier + */ + private static TableName getADummyTableName(String qualifier) { + TableName ret = new TableName(); + ret.namespaceAsString = NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR; + ret.qualifierAsString = qualifier; + ret.nameAsString = createFullyQualified(ret.namespaceAsString, ret.qualifierAsString); + ret.name = Bytes.toBytes(qualifier); + return ret; + } public static TableName valueOf(String namespaceAsString, String qualifierAsString) { TableName ret = new TableName(); if(namespaceAsString == null || namespaceAsString.length() < 1) { Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java (revision 1518396) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java (working copy) @@ -35,6 +35,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; @@ -115,7 +116,7 @@ // The first element in the list is the cluster id on which the change has originated private List clusterIds; - + private NavigableMap scopes; private CompressionContext compressionContext; @@ -148,7 +149,7 @@ long logSeqNum, final long now, List clusterIds){ init(encodedRegionName, tablename, logSeqNum, now, clusterIds); } - + protected void init(final byte [] encodedRegionName, final TableName tablename, long logSeqNum, final long now, List clusterIds) { this.logSeqNum = logSeqNum; @@ -254,9 +255,9 @@ /** * Produces a string map for this key. Useful for programmatic use and - * manipulation of the data stored in an HLogKey, for example, printing + * manipulation of the data stored in an HLogKey, for example, printing * as JSON. - * + * * @return a Map containing data from this key */ public Map toStringMap() { @@ -375,6 +376,7 @@ // @see Bytes#readByteArray(DataInput) this.scopes = null; // writable HLogKey does not contain scopes int len = WritableUtils.readVInt(in); + byte[] tablenameBytes = null; if (len < 0) { // what we just read was the version version = Version.fromCode(len); @@ -387,12 +389,10 @@ if (compressionContext == null || !version.atLeast(Version.COMPRESSED)) { this.encodedRegionName = new byte[len]; in.readFully(this.encodedRegionName); - byte[] tablenameBytes = Bytes.readByteArray(in); - this.tablename = TableName.valueOf(tablenameBytes); + tablenameBytes = Bytes.readByteArray(in); } else { this.encodedRegionName = Compressor.readCompressed(in, compressionContext.regionDict); - byte[] tablenameBytes = Compressor.readCompressed(in, compressionContext.tableDict); - this.tablename = TableName.valueOf(tablenameBytes); + tablenameBytes = Compressor.readCompressed(in, compressionContext.tableDict); } this.logSeqNum = in.readLong(); @@ -413,6 +413,19 @@ // Means it's a very old key, just continue } } + try { + this.tablename = TableName.valueOf(tablenameBytes); + } catch (IllegalArgumentException iae) { + if (Bytes.toString(tablenameBytes).equals(TableName.OLD_META_STR)) { + // It is a pre-namespace meta table edit, continue with new format. + LOG.info("Got an old META edit, continuing with new format "); + this.tablename = TableName.META_TABLE_NAME; + this.encodedRegionName = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(); + } else if (Bytes.toString(tablenameBytes).equals(TableName.OLD_ROOT_STR)) { + this.tablename = TableName.OLD_ROOT_TABLE_NAME; + throw iae; + } else throw iae; + } // Do not need to read the clusters information as we are using protobufs from 0.95 } Index: hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReaderBase.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReaderBase.java (revision 1518396) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ReaderBase.java (working copy) @@ -21,16 +21,20 @@ import java.io.IOException; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALTrailer; import org.apache.hadoop.hbase.util.FSUtils; @InterfaceAudience.Private public abstract class ReaderBase implements HLog.Reader { + private static final Log LOG = LogFactory.getLog(ReaderBase.class); protected Configuration conf; protected FileSystem fs; protected Path path; @@ -95,7 +99,18 @@ e.setCompressionContext(compressionContext); } - boolean hasEntry = readNext(e); + boolean hasEntry = false; + try { + hasEntry = readNext(e); + } catch (IllegalArgumentException iae) { + TableName tableName = e.getKey().getTablename(); + if (tableName != null && tableName.equals(TableName.OLD_ROOT_TABLE_NAME)) { + // It is old ROOT table edit, ignore it + LOG.info("Got an old ROOT edit, ignoring "); + return next(e); + } + else throw iae; + } edit++; if (compressionContext != null && emptyCompressionContext) { emptyCompressionContext = false; @@ -103,7 +118,6 @@ return hasEntry ? e : null; } - @Override public void seek(long pos) throws IOException { if (compressionContext != null && emptyCompressionContext) {