From 334de3660d80acdedc4c6e9f44a0d5e3af747989 Mon Sep 17 00:00:00 2001 From: Ben Lau Date: Wed, 9 Sep 2015 18:10:28 -0700 Subject: [PATCH] Layout Abstraction --- .../java/org/apache/hadoop/hbase/HRegionInfo.java | 28 ++- .../org/apache/hadoop/hbase/MetaTableAccessor.java | 15 +- .../apache/hadoop/hbase/backup/HFileArchiver.java | 24 ++- .../apache/hadoop/hbase/fs/layout/AFsLayout.java | 55 +++++ .../hadoop/hbase/fs/layout/AHFileLinkManager.java | 73 +++++++ .../apache/hadoop/hbase/fs/layout/FsLayout.java | 220 ++++++++++++++++++++ .../hbase/fs/layout/HierarchicalFsLayout.java | 171 +++++++++++++++ .../hbase/fs/layout/StandardHBaseFsLayout.java | 115 ++++++++++ .../java/org/apache/hadoop/hbase/io/HFileLink.java | 13 +- .../hadoop/hbase/io/hfile/HFilePrettyPrinter.java | 14 +- .../apache/hadoop/hbase/master/CatalogJanitor.java | 5 +- .../hadoop/hbase/master/MasterFileSystem.java | 62 +++++- .../apache/hadoop/hbase/master/RegionStates.java | 3 +- .../master/procedure/DeleteTableProcedure.java | 5 +- .../hadoop/hbase/regionserver/CompactionTool.java | 6 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 34 +-- .../hbase/regionserver/HRegionFileSystem.java | 220 +++++++++++++++----- .../regionserver/HRegionFileSystemFactory.java | 30 +++ .../apache/hadoop/hbase/regionserver/HStore.java | 4 +- .../HierarchicalHRegionFileSystem.java | 48 +++++ .../HierarchicalHRegionFileSystemFactory.java | 31 +++ .../hbase/regionserver/SplitTransactionImpl.java | 31 ++- .../hadoop/hbase/regionserver/StoreFileInfo.java | 25 ++- .../hadoop/hbase/snapshot/ExportSnapshot.java | 8 +- .../hbase/snapshot/RestoreSnapshotHelper.java | 27 +-- .../hadoop/hbase/snapshot/SnapshotManifestV1.java | 7 +- .../hadoop/hbase/snapshot/SnapshotManifestV2.java | 2 + .../java/org/apache/hadoop/hbase/util/FSUtils.java | 75 ++----- .../org/apache/hadoop/hbase/util/FSVisitor.java | 8 +- .../org/apache/hadoop/hbase/util/HBaseFsck.java | 27 ++- .../apache/hadoop/hbase/util/HFileArchiveUtil.java | 34 --- .../hbase/util/hbck/HFileCorruptionChecker.java | 9 +- .../org/apache/hadoop/hbase/wal/WALSplitter.java | 15 +- .../apache/hadoop/hbase/HBaseTestingUtility.java | 18 ++ .../apache/hadoop/hbase/TestHumongousTable.java | 184 ++++++++++++++++ .../hadoop/hbase/backup/TestHFileArchiving.java | 51 +++-- .../hbase/backup/TestHumongousHFileArchiving.java | 40 ++++ .../client/TestRestoreSnapshotFromClient.java | 15 +- .../hadoop/hbase/fs/layout/TestFsLayout.java | 103 +++++++++ .../org/apache/hadoop/hbase/io/TestHFileLink.java | 23 +- .../hadoop/hbase/io/TestHumongousHFileLink.java | 40 ++++ .../master/TestAssignmentManagerOnCluster.java | 7 +- .../hbase/master/TestDistributedLogSplitting.java | 52 +++-- .../TestHumongousTableDistributedLogSplitting.java | 41 ++++ .../hbase/master/cleaner/TestHFileCleaner.java | 10 +- .../hbase/master/cleaner/TestHFileLinkCleaner.java | 26 ++- .../cleaner/TestHumongousHFileLinkCleaner.java | 40 ++++ .../master/cleaner/TestSnapshotFromMaster.java | 8 +- .../procedure/MasterProcedureTestingUtility.java | 31 ++- .../procedure/TestAddColumnFamilyProcedure.java | 5 +- .../master/procedure/TestCreateTableProcedure.java | 2 + .../procedure/TestDeleteColumnFamilyProcedure.java | 22 +- .../TestMasterFailoverWithProcedures.java | 3 + .../master/procedure/TestModifyTableProcedure.java | 20 +- .../procedure/TestTruncateTableProcedure.java | 2 + .../regionserver/TestCompactHumongousRegion.java | 41 ++++ .../hadoop/hbase/regionserver/TestCompaction.java | 6 +- .../hbase/regionserver/TestHRegionFileSystem.java | 9 +- .../regionserver/TestHumongousRegionMerge.java | 41 ++++ ...stHumongousRegionMergeTransactionOnCluster.java | 40 ++++ .../hbase/regionserver/TestHumongousStoreFile.java | 41 ++++ .../hbase/regionserver/TestRecoveredEdits.java | 5 +- .../regionserver/TestRegionMergeTransaction.java | 24 ++- .../TestRegionMergeTransactionOnCluster.java | 16 +- ...estSplitHumongousTableTransactionOnCluster.java | 41 ++++ .../hbase/regionserver/TestSplitTransaction.java | 29 ++- .../TestSplitTransactionForHumongousTable.java | 41 ++++ .../TestSplitTransactionOnCluster.java | 18 +- .../hadoop/hbase/regionserver/TestStoreFile.java | 44 ++-- .../regionserver/TestStripeStoreFileManager.java | 5 +- .../regionserver/wal/TestHumongousWALReplay.java | 46 ++++ .../hbase/regionserver/wal/TestWALReplay.java | 8 +- .../hbase/snapshot/SnapshotTestingUtils.java | 9 +- .../snapshot/TestExportHumongousSnapshot.java | 45 ++++ .../hadoop/hbase/snapshot/TestExportSnapshot.java | 9 +- .../TestHumongousRestoreSnapshotHelper.java | 40 ++++ .../hbase/snapshot/TestRestoreSnapshotHelper.java | 2 +- .../hadoop/hbase/util/HFileArchiveTestingUtil.java | 9 +- .../apache/hadoop/hbase/util/TestFSVisitor.java | 5 +- .../apache/hadoop/hbase/util/TestHBaseFsck.java | 59 +++--- .../hbase/util/TestHBaseFsckHumongousTable.java | 49 +++++ .../hadoop/hbase/util/TestHFileArchiveUtil.java | 4 +- .../apache/hadoop/hbase/wal/TestWALFactory.java | 7 +- .../org/apache/hadoop/hbase/wal/TestWALSplit.java | 49 +++-- 84 files changed, 2418 insertions(+), 511 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/AFsLayout.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/AHFileLinkManager.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/FsLayout.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/HierarchicalFsLayout.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/StandardHBaseFsLayout.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystemFactory.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystem.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystemFactory.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/TestHumongousTable.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHumongousHFileArchiving.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/fs/layout/TestFsLayout.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHumongousHFileLink.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHumongousTableDistributedLogSplitting.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHumongousHFileLinkCleaner.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactHumongousRegion.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHumongousRegionMerge.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHumongousRegionMergeTransactionOnCluster.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHumongousStoreFile.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitHumongousTableTransactionOnCluster.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionForHumongousTable.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHumongousWALReplay.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportHumongousSnapshot.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestHumongousRestoreSnapshotHelper.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckHumongousTable.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index c134063..1474779 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -27,10 +27,10 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.RegionReplicaUtil; -import org.apache.hadoop.hbase.KeyValue.KVComparator; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.master.RegionState; @@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PairOfSameType; import org.apache.hadoop.io.DataInputBuffer; +import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.InvalidProtocolBufferException; /** @@ -236,6 +237,18 @@ public class HRegionInfo implements Comparable { this.regionName = createRegionName(tableName, null, regionId, replicaId, false); setHashCode(); } + + /** + * This is only meant to be used for tests that require a specific encoded name + * The HRegionInfo is only guaranteed to work for methods that operate on the encoded + * name or the table name. + */ + @VisibleForTesting + public static HRegionInfo makeTestInfoWithEncodedName(TableName tableName, String encodedName) { + HRegionInfo hri = new HRegionInfo(System.currentTimeMillis(), tableName, 0); + hri.encodedName = encodedName; + return hri; + } public HRegionInfo(final TableName tableName) { this(tableName, null, null); @@ -504,6 +517,19 @@ public class HRegionInfo implements Comparable { public static byte[] getStartKey(final byte[] regionName) throws IOException { return parseRegionName(regionName)[1]; } + + /** Returns an HRI parsed from this regionName. Not all the fields of the HRI + * is stored in the name, so the returned object should only be used for the fields + * in the regionName. + */ + public static HRegionInfo parseRegionInfoFromRegionName(byte[] regionName) + throws IOException { + byte[][] fields = HRegionInfo.parseRegionName(regionName); + long regionId = Long.parseLong(Bytes.toString(fields[2])); + int replicaId = fields.length > 3 ? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0; + return new HRegionInfo( + TableName.valueOf(fields[0]), fields[1], fields[1], false, regionId, replicaId); + } /** * Separate elements of a regionName. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 2fbfd9f..e70825b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -299,7 +299,7 @@ public class MetaTableAccessor { byte[] row = regionName; HRegionInfo parsedInfo = null; try { - parsedInfo = parseRegionInfoFromRegionName(regionName); + parsedInfo = HRegionInfo.parseRegionInfoFromRegionName(regionName); row = getMetaKeyForRegion(parsedInfo); } catch (Exception parseEx) { // Ignore. This is used with tableName passed as regionName. @@ -334,19 +334,6 @@ public class MetaTableAccessor { return RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo).getRegionName(); } - /** Returns an HRI parsed from this regionName. Not all the fields of the HRI - * is stored in the name, so the returned object should only be used for the fields - * in the regionName. - */ - protected static HRegionInfo parseRegionInfoFromRegionName(byte[] regionName) - throws IOException { - byte[][] fields = HRegionInfo.parseRegionName(regionName); - long regionId = Long.parseLong(Bytes.toString(fields[2])); - int replicaId = fields.length > 3 ? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0; - return new HRegionInfo( - TableName.valueOf(fields[0]), fields[1], fields[1], false, regionId, replicaId); - } - /** * Gets the result in hbase:meta for the specified region. * @param connection connection we're using diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java index d682ccc..1a0921f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java @@ -32,7 +32,8 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -72,10 +73,20 @@ public class HFileArchiver { public static void archiveRegion(Configuration conf, FileSystem fs, HRegionInfo info) throws IOException { Path rootDir = FSUtils.getRootDir(conf); - archiveRegion(fs, rootDir, FSUtils.getTableDir(rootDir, info.getTable()), - HRegion.getRegionDir(rootDir, info)); + Path tableDir = FSUtils.getTableDir(rootDir, info.getTable()); + Path regionDir = FsLayout.getRegionDir(tableDir, info); + archiveRegion(fs, rootDir, tableDir, regionDir); + } + + public static Path getRegionArchiveDir(Path rootdir, TableName tableName, Path regionDir) { + return FsLayout.getRegionArchiveDir(rootdir, tableName, regionDir); } + public static void archiveRegion(FileSystem fs, Path rootdir, Path tableDir, HRegionInfo hri) + throws IOException { + archiveRegion(fs, rootdir, tableDir, FsLayout.getRegionDir(tableDir, hri)); + } + /** * Remove an entire region from the table directory via archiving the region's hfiles. * @param fs {@link FileSystem} from which to remove the region @@ -106,9 +117,7 @@ public class HFileArchiver { // make sure the regiondir lives under the tabledir Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString())); - Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(rootdir, - FSUtils.getTableName(tableDir), - regionDir.getName()); + Path regionArchiveDir = getRegionArchiveDir(rootdir, FSUtils.getTableName(tableDir), regionDir); FileStatusConverter getAsFile = new FileStatusConverter(fs); // otherwise, we attempt to archive the store files @@ -161,7 +170,8 @@ public class HFileArchiver { */ public static void archiveFamily(FileSystem fs, Configuration conf, HRegionInfo parent, Path tableDir, byte[] family) throws IOException { - Path familyDir = new Path(tableDir, new Path(parent.getEncodedName(), Bytes.toString(family))); + Path regionDir = FsLayout.getRegionDir(tableDir, parent); + Path familyDir = new Path(regionDir, Bytes.toString(family)); FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir); if (storeFiles == null) { LOG.debug("No store files to dispose for region=" + parent.getRegionNameAsString() + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/AFsLayout.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/AFsLayout.java new file mode 100644 index 0000000..8fd651f --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/AFsLayout.java @@ -0,0 +1,55 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.fs.layout; + +import java.io.IOException; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystemFactory; +import org.apache.hadoop.hbase.snapshot.SnapshotManifest; +import org.apache.hadoop.hbase.util.FSUtils; + +public abstract class AFsLayout { + protected final static Log LOG = LogFactory.getLog(AFsLayout.class); + + public abstract HRegionFileSystemFactory getHRegionFileSystemFactory(); + + public abstract Path makeHFileLinkPath(SnapshotManifest snapshotManifest, HRegionInfo regionInfo, String familyName, String hfileName); + + public abstract Path getRegionArchiveDir(Path rootDir, TableName tableName, Path regiondir); + + public abstract Path getTableDirFromRegionDir(Path regionDir); + + public abstract List getRegionDirPaths(FileSystem fs, Path tableDir) throws IOException; + + public abstract List getRegionDirFileStats(FileSystem fs, Path tableDir, FSUtils.RegionDirFilter filter) throws IOException; + + public abstract Path getRegionDir(Path tableDir, String name); + + protected AFsLayout() { + super(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/AHFileLinkManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/AHFileLinkManager.java new file mode 100644 index 0000000..0eb2489 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/AHFileLinkManager.java @@ -0,0 +1,73 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.fs.layout; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.io.HFileLink; +import org.apache.hadoop.hbase.util.Pair; + +public abstract class AHFileLinkManager { + public abstract HFileLink buildFromHFileLinkPattern(Configuration conf, Path hFileLinkPattern) throws IOException; + + public abstract HFileLink buildFromHFileLinkPattern(final Path rootDir, final Path archiveDir, final Path hFileLinkPattern); + + public abstract Path createPath(final TableName table, final String region, final String family, final String hfile); + + public abstract HFileLink build(final Configuration conf, final TableName table, final String region, final String family, final String hfile) + throws IOException; + + public abstract boolean isHFileLink(final Path path); + + public abstract boolean isHFileLink(String fileName); + + public abstract String getReferencedHFileName(final String fileName); + + public abstract Path getHFileLinkPatternRelativePath(Path path); + + public abstract String getReferencedRegionName(final String fileName); + + public abstract TableName getReferencedTableName(final String fileName); + + public abstract String createHFileLinkName(final HRegionInfo hfileRegionInfo, final String hfileName); + + public abstract String createHFileLinkName(final TableName tableName, final String regionName, final String hfileName); + + public abstract boolean create(final Configuration conf, final FileSystem fs, final Path dstFamilyPath, final HRegionInfo hfileRegionInfo, final String hfileName) + throws IOException; + + public abstract boolean createFromHFileLink(final Configuration conf, final FileSystem fs, final Path dstFamilyPath, final String hfileLinkName) throws IOException; + + public abstract String createBackReferenceName(final String tableNameStr, final String regionName); + + public abstract Path getHFileFromBackReference(final Path rootDir, final Path linkRefPath); + + public abstract Pair parseBackReferenceName(String name); + + public abstract Path getHFileFromBackReference(final Configuration conf, final Path linkRefPath) throws IOException; + + public AHFileLinkManager() { + super(); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/FsLayout.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/FsLayout.java new file mode 100644 index 0000000..c97d2a0 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/FsLayout.java @@ -0,0 +1,220 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.fs.layout; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystemFactory; +import org.apache.hadoop.hbase.snapshot.SnapshotManifest; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.FSUtils.RegionDirFilter; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Throwables; +import com.google.common.primitives.Ints; + +import edu.umd.cs.findbugs.annotations.NonNull; +import edu.umd.cs.findbugs.annotations.Nullable; + +/** + * This class exists mostly to allow us to access the layouts statically for convenience, as + * though the layout classes are Util files. + */ +public class FsLayout { + private static final Log LOG = LogFactory.getLog(FsLayout.class); + + public static final String FS_LAYOUT_CHOICE = "hbase.fs.layout.choose"; + public static final String FS_LAYOUT_DETECT = "hbase.fs.layout.detect"; + public static final String FS_LAYOUT_DETECT_STRICT = "hbase.fs.layout.detect.strict"; + public static final String FS_LAYOUT_FILE_NAME = ".fslayout"; + + // TODO: How can we avoid having a volatile variable (slightly slower reads)? + // TODO: Move FsLayout class/contents into FSUtils? + private static volatile AFsLayout fsLayout = null; + + public static final PathFilter FS_LAYOUT_PATHFILTER = new PathFilter() { + @Override + public boolean accept(Path p) { + return p.getName().equals(FS_LAYOUT_FILE_NAME); + } + }; + + @VisibleForTesting + static AFsLayout getRaw() { + return fsLayout; + } + + public static AFsLayout get() { + AFsLayout curLayout = fsLayout; + if (curLayout == null) { + return initialize(null); + } else { + return curLayout; + } + } + + @VisibleForTesting + public static void reset() { + LOG.debug("Resetting FS layout to null"); + fsLayout = null; + } + + @VisibleForTesting + public static void setLayoutForTesting(@NonNull AFsLayout inputLayout) { + LOG.debug("Setting FS layout to: " + inputLayout.getClass().getSimpleName()); + fsLayout = inputLayout; + } + + /* + * TODO: Should this be required to be called manually? + * Maybe call it manually in some processes (master/regionserver) and automatically everywhere else + */ + @VisibleForTesting + static synchronized AFsLayout initialize(Configuration conf) { + try { + if (fsLayout != null) { + LOG.debug("Already initialized FS layout, not going to re-initialize"); + return fsLayout; + } + if (conf == null) { + conf = HBaseConfiguration.create(); + } + String choice = conf.get(FS_LAYOUT_CHOICE, null); + boolean autodetect = conf.getBoolean(FS_LAYOUT_DETECT, false); + if (choice != null && autodetect) { + throw new IllegalStateException("Configuration both chooses a layout and " + + "tries to automatically detect the layout"); + } + if (choice != null) { + Class layoutClass = Class.forName(choice); + Method getMethod = layoutClass.getMethod("get"); + return (AFsLayout) getMethod.invoke(null); + } + if (autodetect) { + LOG.debug("Trying to detect hbase layout on filesystem"); + FileSystem fs = FSUtils.getCurrentFileSystem(conf); + Path rootDir = FSUtils.getRootDir(conf); + AFsLayout fsLayoutFromFile = readLayoutFile(fs, rootDir); + if (fsLayoutFromFile == null) { + if (conf.getBoolean(FS_LAYOUT_DETECT_STRICT, false)) { + throw new IllegalStateException("Tried to detect fs layout, but there was no layout file at the root!"); + } else { + LOG.debug("Didn't find a layout file, assuming classical hbase fs layout"); + fsLayout = StandardHBaseFsLayout.get(); + } + } else { + LOG.info("Detected hbase fs layout: " + fsLayoutFromFile.getClass().getSimpleName()); + fsLayout = fsLayoutFromFile; + } + } else { + fsLayout = StandardHBaseFsLayout.get(); + } + } catch (Exception e) { + Throwables.propagate(e); + } + return fsLayout; + } + + public static AFsLayout readLayoutFile(FileSystem fs, Path rootDir) + throws FileNotFoundException, IOException, ClassNotFoundException, + NoSuchMethodException, SecurityException, IllegalAccessException, + IllegalArgumentException, InvocationTargetException { + Path layoutFilePath = new Path(rootDir, FS_LAYOUT_FILE_NAME); + FileStatus[] statuses = fs.listStatus(rootDir, FS_LAYOUT_PATHFILTER); + if (statuses.length != 1) { + return null; + } + FileStatus stat = statuses[0]; + int len = Ints.checkedCast(stat.getLen()); + byte[] inputStreamBytes = new byte[len]; + FSDataInputStream inputStream = fs.open(layoutFilePath); + inputStream.readFully(inputStreamBytes); + inputStream.close(); + String layoutClassName = Bytes.toString(inputStreamBytes); + Class layoutClass = Class.forName(layoutClassName); + Method getMethod = layoutClass.getMethod("get"); + return (AFsLayout) getMethod.invoke(null); + } + + public static void writeLayoutFile(FileSystem fs, Path rootDir, AFsLayout fsLayout, boolean overwrite) + throws IOException { + Path layoutFilePath = new Path(rootDir, FS_LAYOUT_FILE_NAME); + FSDataOutputStream outputStream = fs.create(layoutFilePath, overwrite); + try { + outputStream.write(Bytes.toBytes(fsLayout.getClass().getCanonicalName())); + } finally { + outputStream.close(); + } + } + + public static boolean deleteLayoutFile(FileSystem fs, Path rootDir) throws IOException { + Path layoutFilePath = new Path(rootDir, FS_LAYOUT_FILE_NAME); + return fs.delete(layoutFilePath, false); + } + + public static HRegionFileSystemFactory getHRegionFileSystemFactory() { + return get().getHRegionFileSystemFactory(); + } + + public static Path getRegionDir(Path tableDir, HRegionInfo regionInfo) { + return getRegionDir(tableDir, regionInfo.getEncodedName()); + } + + public static Path getRegionDir(Path tableDir, String name) { + return get().getRegionDir(tableDir, name); + } + + @Nullable + public static List getRegionDirFileStats(FileSystem fs, Path tableDir, RegionDirFilter filter) throws IOException { + return get().getRegionDirFileStats(fs, tableDir, filter); + } + + public static List getRegionDirPaths(FileSystem fs, Path tableDir) throws IOException { + return get().getRegionDirPaths(fs, tableDir); + } + + public static Path getTableDirFromRegionDir(Path regionDir) { + return get().getTableDirFromRegionDir(regionDir); + } + + public static Path getRegionArchiveDir(Path rootDir, TableName tableName, Path regiondir) { + return get().getRegionArchiveDir(rootDir, tableName, regiondir); + } + + public static Path makeHFileLinkPath(SnapshotManifest snapshotManifest, HRegionInfo regionInfo, String familyName, String hfileName) { + return get().makeHFileLinkPath(snapshotManifest, regionInfo, familyName, hfileName); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/HierarchicalFsLayout.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/HierarchicalFsLayout.java new file mode 100644 index 0000000..d17428a --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/HierarchicalFsLayout.java @@ -0,0 +1,171 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.fs.layout; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; +import org.apache.hadoop.hbase.regionserver.HierarchicalHRegionFileSystemFactory; +import org.apache.hadoop.hbase.snapshot.SnapshotManifest; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.FSUtils.RegionDirFilter; +import org.apache.hadoop.hbase.util.HFileArchiveUtil; + + +public class HierarchicalFsLayout extends AFsLayout { + private static final String OLD_REGION_NAME_PADDING = "abcdef1234abcdef1234abcdef1234ab"; + private static final HierarchicalFsLayout LAYOUT = new HierarchicalFsLayout(); + + static { + assert OLD_REGION_NAME_PADDING.length() == 32; + } + + public static HierarchicalFsLayout get() { return LAYOUT; } + + private HierarchicalFsLayout() { } + + @Override + public HierarchicalHRegionFileSystemFactory getHRegionFileSystemFactory() { + return new HierarchicalHRegionFileSystemFactory(); + } + + @Override + public Path getRegionDir(Path tableDir, String name) { + return getHumongousRegionDir(tableDir, name); + } + + private Path getHumongousRegionDir(final Path tabledir, final String name) { + if (name.length() != HRegionInfo.MD5_HEX_LENGTH) { + String table = tabledir.getName(); + String namespace = tabledir.getParent().getName(); + + // Meta and old root table use the old encoded name format still + if (!namespace.equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) { + throw new IllegalArgumentException("The region with encoded name " + name + + " is not a humongous region, cannot get humongous region dir from it."); + } + if (!table.equals(TableName.META_TABLE_NAME.getQualifierAsString()) && + !table.equals(TableName.OLD_ROOT_TABLE_NAME.getQualifierAsString())) { + throw new IllegalArgumentException("The region with encoded name " + name + + " is not a humongous region, cannot get humongous region dir from it."); + } + + // Add padding to guarantee we will have enough characters + return new Path(new Path(tabledir, makeBucketName(name, OLD_REGION_NAME_PADDING)), name); + } + return new Path(new Path(tabledir, makeBucketName(name, null)), name); + } + + private String makeBucketName(String regionName, String padding) { + if (padding != null) { + regionName = regionName + padding; + } + return regionName.substring(HRegionInfo.MD5_HEX_LENGTH + - HRegionFileSystem.HUMONGOUS_DIR_NAME_SIZE); + } + + @Override + public List getRegionDirFileStats(FileSystem fs, Path tableDir, RegionDirFilter filter) + throws IOException { + FileStatus[] buckets = FSUtils.listStatus(fs, tableDir); + if (buckets == null) { + return null; + } + List stats = new ArrayList(); + for (FileStatus bucket : buckets) { + FileStatus[] regionDirs = null; + if (filter != null) { + regionDirs = fs.listStatus(bucket.getPath(), filter); + } else { + regionDirs = fs.listStatus(bucket.getPath()); + } + for (FileStatus regionDir : regionDirs) { + stats.add(regionDir); + } + } + if (stats.size() == 0) { + return null; + } + return stats; + } + + /** + * Given a particular table dir, return all the regiondirs inside it, excluding files such as + * .tableinfo + * @param fs A file system for the Path + * @param tableDir Path to a specific table directory <hbase.rootdir>/<tabledir> + * @return List of paths to valid region directories in table dir. + * @throws IOException + */ + @Override + public List getRegionDirPaths(final FileSystem fs, final Path tableDir) throws IOException { + // assumes we are in a table dir. + FileStatus[] rds = fs.listStatus(tableDir, new FSUtils.RegionDirFilter(fs)); + List regionDirs = new ArrayList(); + for (FileStatus rdfs : rds) { + // get all region dirs from bucket dir + FileStatus[] bucket_rds = fs.listStatus(rdfs.getPath(), + new FSUtils.RegionDirFilter(fs)); + for (FileStatus bucket_rdfs : bucket_rds) { + regionDirs.add(bucket_rdfs.getPath()); + } + } + return regionDirs; + } + + @Override + public Path getTableDirFromRegionDir(Path regionDir) { + return regionDir.getParent().getParent(); + } + + /** + * Get the archive directory for a given region under the specified table + * @param tableName the table name. Cannot be null. + * @param regiondir the path to the region directory. Cannot be null. + * @return {@link Path} to the directory to archive the given region, or null if it + * should not be archived + */ + @Override + public Path getRegionArchiveDir(Path rootDir, + TableName tableName, + Path regiondir) { + // get the archive directory for a table + Path archiveDir = HFileArchiveUtil.getTableArchivePath(rootDir, tableName); + + // then add on the region path under the archive + String encodedRegionName = regiondir.getName(); + String parentName = regiondir.getParent().getName(); + + return new Path(archiveDir, new Path(parentName, encodedRegionName)); + } + + @Override + public Path makeHFileLinkPath(SnapshotManifest snapshotManifest, HRegionInfo regionInfo, String familyName, String hfileName) { + return new Path(new Path(getHumongousRegionDir(snapshotManifest.getSnapshotDir(), + regionInfo.getEncodedName()), familyName), hfileName); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/StandardHBaseFsLayout.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/StandardHBaseFsLayout.java new file mode 100644 index 0000000..98edb78 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/layout/StandardHBaseFsLayout.java @@ -0,0 +1,115 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.fs.layout; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystemFactory; +import org.apache.hadoop.hbase.snapshot.SnapshotManifest; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.FSUtils.RegionDirFilter; +import org.apache.hadoop.hbase.util.HFileArchiveUtil; + + +public class StandardHBaseFsLayout extends AFsLayout { + private static final StandardHBaseFsLayout LAYOUT = new StandardHBaseFsLayout(); + + public static StandardHBaseFsLayout get() { return LAYOUT; } + + private StandardHBaseFsLayout() { } + + @Override + public HRegionFileSystemFactory getHRegionFileSystemFactory() { + return new HRegionFileSystemFactory(); + } + + @Override + public Path getRegionDir(Path tableDir, String name) { + return new Path(tableDir, name); + } + + @Override + public List getRegionDirFileStats(FileSystem fs, Path tableDir, RegionDirFilter filter) + throws IOException { + FileStatus[] rds = FSUtils.listStatus(fs, tableDir, filter); + if (rds == null) { + return null; + } + List regionStatus = new ArrayList(rds.length); + for (FileStatus rdfs : rds) { + regionStatus.add(rdfs); + } + return regionStatus; + } + + /** + * Given a particular table dir, return all the regiondirs inside it, excluding files such as + * .tableinfo + * @param fs A file system for the Path + * @param tableDir Path to a specific table directory <hbase.rootdir>/<tabledir> + * @return List of paths to valid region directories in table dir. + * @throws IOException + */ + @Override + public List getRegionDirPaths(FileSystem fs, Path tableDir) throws IOException { + // assumes we are in a table dir. + FileStatus[] rds = fs.listStatus(tableDir, new FSUtils.RegionDirFilter(fs)); + List regionDirs = new ArrayList(); + for (FileStatus rdfs : rds) { + regionDirs.add(rdfs.getPath()); + } + return regionDirs; + } + + @Override + public Path getTableDirFromRegionDir(Path regionDir) { + return regionDir.getParent(); + } + + /** + * Get the archive directory for a given region under the specified table + * @param tableName the table name. Cannot be null. + * @param regiondir the path to the region directory. Cannot be null. + * @return {@link Path} to the directory to archive the given region, or null if it + * should not be archived + */ + @Override + public Path getRegionArchiveDir(Path rootDir, + TableName tableName, + Path regiondir) { + // get the archive directory for a table + Path archiveDir = HFileArchiveUtil.getTableArchivePath(rootDir, tableName); + // then add on the region path under the archive + String encodedRegionName = regiondir.getName(); + return new Path(archiveDir, encodedRegionName); + } + + @Override + public Path makeHFileLinkPath(SnapshotManifest snapshotManifest, HRegionInfo regionInfo, String familyName, String hfileName) { + return new Path(new Path(new Path(snapshotManifest.getSnapshotDir(), + regionInfo.getEncodedName()), familyName), hfileName); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java index c17720c..4247f49 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HFileLink.java @@ -25,13 +25,13 @@ import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; @@ -216,8 +216,9 @@ public class HFileLink extends FileLink { String hfileName = m.group(4); String familyName = path.getParent().getName(); Path tableDir = FSUtils.getTableDir(new Path("./"), tableName); - return new Path(tableDir, new Path(regionName, new Path(familyName, - hfileName))); + Path regionDir = FsLayout.getRegionDir(tableDir, regionName); + return new Path(regionDir, new Path(familyName, + hfileName)); } /** @@ -333,7 +334,7 @@ public class HFileLink extends FileLink { final String hfileName) throws IOException { String familyName = dstFamilyPath.getName(); String regionName = dstFamilyPath.getParent().getName(); - String tableName = FSUtils.getTableName(dstFamilyPath.getParent().getParent()) + String tableName = FSUtils.getTableName(FsLayout.getTableDirFromRegionDir(dstFamilyPath.getParent())) .getNameAsString(); String name = createHFileLinkName(linkedTable, linkedRegion, hfileName); @@ -410,12 +411,12 @@ public class HFileLink extends FileLink { String hfileName = getBackReferenceFileName(linkRefPath.getParent()); Path familyPath = linkRefPath.getParent().getParent(); Path regionPath = familyPath.getParent(); - Path tablePath = regionPath.getParent(); + Path tablePath = FsLayout.getTableDirFromRegionDir(regionPath); String linkName = createHFileLinkName(FSUtils.getTableName(tablePath), regionPath.getName(), hfileName); Path linkTableDir = FSUtils.getTableDir(rootDir, linkTableName); - Path regionDir = HRegion.getRegionDir(linkTableDir, linkRegionName); + Path regionDir = FsLayout.getRegionDir(linkTableDir, linkRegionName); return new Path(new Path(regionDir, familyPath.getName()), linkName); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index cb7dc62..757ad1f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HRegionInfo; @@ -57,11 +58,13 @@ import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; import org.apache.hadoop.hbase.util.BloomFilter; import org.apache.hadoop.hbase.util.BloomFilterUtil; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.util.Tool; @@ -177,14 +180,15 @@ public class HFilePrettyPrinter extends Configured implements Tool { if (cmd.hasOption("r")) { String regionName = cmd.getOptionValue("r"); byte[] rn = Bytes.toBytes(regionName); - byte[][] hri = HRegionInfo.parseRegionName(rn); + HRegionInfo hri = HRegionInfo.parseRegionInfoFromRegionName(rn); Path rootDir = FSUtils.getRootDir(getConf()); - Path tableDir = FSUtils.getTableDir(rootDir, TableName.valueOf(hri[0])); - String enc = HRegionInfo.encodeRegionName(rn); - Path regionDir = new Path(tableDir, enc); + Path tableDir = FSUtils.getTableDir(rootDir, TableName.valueOf( + hri.getTable().getNameAsString())); + FileSystem fs = FileSystem.get(getConf()); + Path regionDir = HRegionFileSystem.create(getConf(), fs, tableDir, hri).getRegionDir(); if (verbose) System.out.println("region dir -> " + regionDir); - List regionFiles = HFile.getStoreFiles(FileSystem.get(getConf()), + List regionFiles = HFile.getStoreFiles(fs, regionDir); if (verbose) System.out.println("Number of region files found -> " diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index e9fca27..9d78c1b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; @@ -367,7 +368,9 @@ public class CatalogJanitor extends ScheduledChore { Path rootdir = this.services.getMasterFileSystem().getRootDir(); Path tabledir = FSUtils.getTableDir(rootdir, daughter.getTable()); - Path daughterRegionDir = new Path(tabledir, daughter.getEncodedName()); + HRegionFileSystem hrfs = HRegionFileSystem.create( + this.services.getConfiguration(), fs, tabledir, daughter); + Path daughterRegionDir = hrfs.getRegionDir(); HRegionFileSystem regionFs = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 3718a5a..24f3071 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -30,6 +30,7 @@ import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -42,6 +43,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.InvalidFamilyOperationException; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableDescriptor; @@ -49,8 +51,10 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.fs.HFileSystem; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.wal.DefaultWALProvider; import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hbase.util.Bytes; @@ -482,7 +486,7 @@ public class MasterFileSystem { clusterId = FSUtils.getClusterId(fs, rd); // Make sure the meta region directory exists! - if (!FSUtils.metaRegionExists(fs, rd)) { + if (!metaRegionExists(fs, rd)) { bootstrap(rd, c); } else { // Migrate table descriptor files if necessary @@ -500,6 +504,21 @@ public class MasterFileSystem { return rd; } + + /** + * Checks if meta region exists + * + * @param fs file system + * @param rootdir root directory of HBase installation + * @return true if exists + * @throws IOException e + */ + public static boolean metaRegionExists(FileSystem fs, Path rootdir) + throws IOException { + Path tableDir = FSUtils.getTableDir(rootdir, TableName.META_TABLE_NAME); + Path metaRegionDir = FsLayout.getRegionDir(tableDir, HRegionInfo.FIRST_META_REGIONINFO); + return fs.exists(metaRegionDir); + } /** * Make sure the hbase temp directory exists and is empty. @@ -512,7 +531,7 @@ public class MasterFileSystem { // Archive table in temp, maybe left over from failed deletion, // if not the cleaner will take care of them. for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) { - for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) { + for (Path regiondir: FsLayout.getRegionDirPaths(fs, tabledir)) { HFileArchiver.archiveRegion(fs, this.rootdir, tabledir, regiondir); } } @@ -604,9 +623,9 @@ public class MasterFileSystem { Path tableDir = FSUtils.getTableDir(rootdir, region.getTable()); HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName); - // delete the family folder - Path familyDir = new Path(tableDir, - new Path(region.getEncodedName(), Bytes.toString(familyName))); + Path regionDir = FsLayout.getRegionDir(tableDir, region); + Path familyDir = new Path(regionDir, Bytes.toString(familyName)); + if (fs.delete(familyDir, true) == false) { if (fs.exists(familyDir)) { throw new IOException("Could not delete family " @@ -683,7 +702,38 @@ public class MasterFileSystem { this.services.getTableDescriptors().add(htd); return htd; } - + + // TODO: Can't get rid of this in totality because the caller of this method + // (TestSplitTransactionOnCluster.testSSHCleanupDaughterRegionsOfABortedSplit) + // is testing something where the FS does not agree with the meta + // At best can just make an assertion about # of region dirs in MFS and not expose + // what they actually are + public List getRegionDirs(TableName tableName) throws IOException { + FileSystem fs = getFileSystem(); + Path rootDir = FSUtils.getRootDir(conf); + Path tableDir = FSUtils.getTableDir(rootDir, tableName); + return FsLayout.getRegionDirPaths(fs, tableDir); + } + + // Only returns region filesystems for regions in meta + // Will ignore anything on filesystem + public List getRegionFileSystems(Configuration conf, + Connection connection, TableName tableName) throws IOException { + + FileSystem fs = getFileSystem(); + Path rootDir = FSUtils.getRootDir(conf); + Path tableDir = FSUtils.getTableDir(rootDir, tableName); + + List regionInfos = MetaTableAccessor.getTableRegions(connection, tableName); + + List results = new ArrayList(); + for (HRegionInfo regionInfo : regionInfos) { + HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, tableDir, regionInfo); + results.add(hrfs); + } + return results; + } + /** * The function is used in SSH to set recovery mode based on configuration after all outstanding * log split tasks drained. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java index 658a879..bd672dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.master.RegionState.State; import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; @@ -645,7 +646,7 @@ public class RegionStates { if (MetaTableAccessor.getRegion(server.getConnection(), hri.getEncodedNameAsBytes()) == null) { regionOffline(hri); - FSUtils.deleteRegionDir(server.getConfiguration(), hri); + HRegionFileSystem.deleteRegionDir(server.getConfiguration(), hri); } } catch (IOException e) { LOG.warn("Got exception while deleting " + hri + " directories from file system.", e); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index 60212e8..e1e088e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -44,7 +44,7 @@ import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.exceptions.HBaseException; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterFileSystem; @@ -336,8 +336,7 @@ public class DeleteTableProcedure if (archive) { for (HRegionInfo hri : regions) { LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS"); - HFileArchiver.archiveRegion(fs, mfs.getRootDir(), - tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName())); + HFileArchiver.archiveRegion(fs, mfs.getRootDir(), tempTableDir, hri); } LOG.debug("Table '" + tableName + "' archived!"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java index 0f7e42a..9d54b00 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java @@ -28,6 +28,7 @@ import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileStatus; @@ -131,7 +132,8 @@ public class CompactionTool extends Configured implements Tool { private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major) throws IOException { TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); - for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) { + // TODO: TableFileSystem?? + for (Path regionDir: FsLayout.getRegionDirPaths(fs, tableDir)) { compactRegion(tableDir, htd.getHTableDescriptor(), regionDir, compactOnce, major); } } @@ -312,7 +314,7 @@ public class CompactionTool extends Configured implements Tool { } } else if (isTableDir(fs, compactDir)) { // Lookup regions - for (Path regionDir: FSUtils.getRegionDirs(fs, compactDir)) { + for (Path regionDir: FsLayout.getRegionDirPaths(fs, compactDir)) { for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) { storeDirs.add(familyDir); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 7a69e32..b33c853 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -613,7 +613,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi public HRegion(final Path tableDir, final WAL wal, final FileSystem fs, final Configuration confParam, final HRegionInfo regionInfo, final HTableDescriptor htd, final RegionServerServices rsServices) { - this(new HRegionFileSystem(confParam, fs, tableDir, regionInfo), + this(HRegionFileSystem.create(confParam, fs, tableDir, regionInfo), wal, confParam, htd, rsServices); } @@ -1041,7 +1041,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); FileSystem fs = tablePath.getFileSystem(conf); - HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tablePath, regionInfo); + HRegionFileSystem regionFs = HRegionFileSystem.create(conf, fs, tablePath, regionInfo); for (HColumnDescriptor family: tableDescriptor.getFamilies()) { Collection storeFiles = regionFs.getStoreFiles(family.getNameAsString()); if (storeFiles == null) continue; @@ -6248,35 +6248,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi Bytes.toBytes(HConstants.META_VERSION))); meta.put(row, HConstants.CATALOG_FAMILY, cells); } - - /** - * Computes the Path of the HRegion - * - * @param tabledir qualified path for table - * @param name ENCODED region name - * @return Path of HRegion directory - * @deprecated For tests only; to be removed. - */ - @Deprecated - public static Path getRegionDir(final Path tabledir, final String name) { - return new Path(tabledir, name); - } - - /** - * Computes the Path of the HRegion - * - * @param rootdir qualified path of HBase root directory - * @param info HRegionInfo for the region - * @return qualified path of region directory - * @deprecated For tests only; to be removed. - */ - @Deprecated - @VisibleForTesting - public static Path getRegionDir(final Path rootdir, final HRegionInfo info) { - return new Path( - FSUtils.getTableDir(rootdir, info.getTable()), info.getEncodedName()); - } - + /** * Determines if the specified row is within the row range specified by the * specified HRegionInfo diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index f4eaaf9..9aeb5cd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -48,12 +48,15 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.fs.HFileSystem; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSHDFSUtils; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; +import com.google.common.annotations.VisibleForTesting; + /** * View to an on-disk Region. * Provides the set of methods necessary to interact with the on-disk region data. @@ -72,32 +75,39 @@ public class HRegionFileSystem { public static final String REGION_SPLITS_DIR = ".splits"; /** Temporary subdirectory of the region directory used for compaction output. */ - private static final String REGION_TEMP_DIR = ".tmp"; + protected static final String REGION_TEMP_DIR = ".tmp"; - private final HRegionInfo regionInfo; + protected final HRegionInfo regionInfo; //regionInfo for interacting with FS (getting encodedName, etc) - private final HRegionInfo regionInfoForFs; - private final Configuration conf; - private final Path tableDir; - private final FileSystem fs; + protected final HRegionInfo regionInfoForFs; + protected final Configuration conf; + protected final Path tableDir; + protected final FileSystem fs; + + /** Number of characters for DIR name, 4 characters for 16^4 = 65536 buckets. */ + public static final int HUMONGOUS_DIR_NAME_SIZE = 4; /** * In order to handle NN connectivity hiccups, one need to retry non-idempotent operation at the * client level. */ - private final int hdfsClientRetriesNumber; - private final int baseSleepBeforeRetries; - private static final int DEFAULT_HDFS_CLIENT_RETRIES_NUMBER = 10; - private static final int DEFAULT_BASE_SLEEP_BEFORE_RETRIES = 1000; + protected final int hdfsClientRetriesNumber; + protected final int baseSleepBeforeRetries; + protected static final int DEFAULT_HDFS_CLIENT_RETRIES_NUMBER = 10; + protected static final int DEFAULT_BASE_SLEEP_BEFORE_RETRIES = 1000; /** + * Use the static factory methods on this class for construction, unless you are an + * HRegionFileSystem subclass constructor or the HRegionFileSystemFactory. + * * Create a view to the on-disk region + * * @param conf the {@link Configuration} to use * @param fs {@link FileSystem} that contains the region * @param tableDir {@link Path} to where the table is being stored * @param regionInfo {@link HRegionInfo} for region */ - HRegionFileSystem(final Configuration conf, final FileSystem fs, final Path tableDir, + protected HRegionFileSystem(final Configuration conf, final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) { this.fs = fs; this.conf = conf; @@ -110,6 +120,11 @@ public class HRegionFileSystem { DEFAULT_BASE_SLEEP_BEFORE_RETRIES); } + public static HRegionFileSystem create(final Configuration conf, final FileSystem fs, final Path tableDir, + final HRegionInfo regionInfo) { + return FsLayout.getHRegionFileSystemFactory().create(conf, fs, tableDir, regionInfo); + } + /** @return the underlying {@link FileSystem} */ public FileSystem getFileSystem() { return this.fs; @@ -131,9 +146,14 @@ public class HRegionFileSystem { /** @return {@link Path} to the region directory. */ public Path getRegionDir() { - return new Path(this.tableDir, this.regionInfoForFs.getEncodedName()); + return FsLayout.getRegionDir(this.tableDir, this.regionInfoForFs); } - + + /** @return {@link Path} to the daughter region provided */ + public Path getDaughterRegionDir(HRegionInfo daughter) { + return FsLayout.getRegionDir(this.tableDir, daughter); + } + // =========================================================================== // Temp Helpers // =========================================================================== @@ -242,7 +262,27 @@ public class HRegionFileSystem { return ServerRegionReplicaUtil.getStoreFileInfo(conf, fs, regionInfo, regionInfoForFs, familyName, new Path(familyDir, fileName)); } + + void assertReferenceFileCountOfSplitsDir(int expectedReferenceFileCount, HRegionInfo daughter) + throws IOException { + Path splitsDir = getSplitsDir(daughter); + if (expectedReferenceFileCount != 0 && + expectedReferenceFileCount != FSUtils.getRegionReferenceFileCount(getFileSystem(), + splitsDir)) { + throw new IOException("Failing split. Expected reference file count isn't equal."); + } + } + void assertReferenceFileCountOfDaughterDir(int expectedReferenceFileCount, HRegionInfo daughter) + throws IOException { + Path daughterRegionDir = FsLayout.getRegionDir(getTableDir(), daughter); + if (expectedReferenceFileCount != 0 && + expectedReferenceFileCount != FSUtils.getRegionReferenceFileCount(getFileSystem(), + daughterRegionDir)) { + throw new IOException("Failing split. Expected reference file count isn't equal."); + } + } + /** * Returns true if the specified family has reference files * @param familyName Column Family Name @@ -308,7 +348,7 @@ public class HRegionFileSystem { * @param suffix extra information to append to the generated name * @return Unique file name */ - private static String generateUniqueName(final String suffix) { + protected static String generateUniqueName(final String suffix) { String name = UUID.randomUUID().toString().replaceAll("-", ""); if (suffix != null) name += suffix; return name; @@ -365,7 +405,7 @@ public class HRegionFileSystem { * @return The new {@link Path} of the committed file * @throws IOException */ - private Path commitStoreFile(final String familyName, final Path buildPath, + protected Path commitStoreFile(final String familyName, final Path buildPath, final long seqNum, final boolean generateNewName) throws IOException { Path storeDir = getStoreDir(familyName); if(!fs.exists(storeDir) && !createDir(storeDir)) @@ -496,7 +536,7 @@ public class HRegionFileSystem { FileStatus[] daughters = FSUtils.listStatus(fs, splitdir, new FSUtils.DirFilter(fs)); if (daughters != null) { for (FileStatus daughter: daughters) { - Path daughterDir = new Path(getTableDir(), daughter.getPath().getName()); + Path daughterDir = FsLayout.getRegionDir(getTableDir(), daughter.getPath().getName()); if (fs.exists(daughterDir) && !deleteDir(daughterDir)) { throw new IOException("Failed delete of " + daughterDir); } @@ -512,7 +552,7 @@ public class HRegionFileSystem { * @throws IOException */ void cleanupDaughterRegion(final HRegionInfo regionInfo) throws IOException { - Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName()); + Path regionDir = FsLayout.getRegionDir(this.tableDir, regionInfo); if (this.fs.exists(regionDir) && !deleteDir(regionDir)) { throw new IOException("Failed delete of " + regionDir); } @@ -527,8 +567,13 @@ public class HRegionFileSystem { */ Path commitDaughterRegion(final HRegionInfo regionInfo) throws IOException { - Path regionDir = new Path(this.tableDir, regionInfo.getEncodedName()); + Path regionDir = FsLayout.getRegionDir(this.tableDir, regionInfo); Path daughterTmpDir = this.getSplitsDir(regionInfo); + + // + // + // /table/bucket2/parent/.splits/daughter/.regioninfo + // /table/bucket1 if (fs.exists(daughterTmpDir)) { @@ -536,15 +581,30 @@ public class HRegionFileSystem { Path regionInfoFile = new Path(daughterTmpDir, REGION_INFO_FILE); byte[] regionInfoContent = getRegionInfoFileContent(regionInfo); writeRegionInfoFileContent(conf, fs, regionInfoFile, regionInfoContent); - - // Move the daughter temp dir to the table dir - if (!rename(daughterTmpDir, regionDir)) { - throw new IOException("Unable to rename " + daughterTmpDir + " to " + regionDir); - } + + // Move the daughter region dir to its final place + moveNewRegionFromTmpDirToRegionDir(daughterTmpDir, regionDir); } return regionDir; } + + /** + * Finalize the creation of a new region by moving it from a temporary staging + * directory to its final region directory in the table directory + * + * Example: Moving /table/parent/.splits/daughter to /table/daughter for a new + * daughter region created from a region split + * + * @param source temporary staging directory + * @param dest final region directory + * @throws IOException + */ + void moveNewRegionFromTmpDirToRegionDir(Path source, Path dest) throws IOException { + if (!rename(source, dest)) { + throw new IOException("Unable to rename " + source + " to " + dest); + } + } /** * Create the region splits directory. @@ -623,7 +683,11 @@ public class HRegionFileSystem { String parentRegionName = regionInfoForFs.getEncodedName(); // Write reference with same file id only with the other region name as // suffix and into the new region location (under same family). - Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName); + return createReferenceFile(r, f, parentRegionName, splitDir); + } + + Path createReferenceFile(Reference r, StoreFile f, String originalRegionName, Path targetDir) throws IOException { + Path p = new Path(targetDir, f.getPath().getName() + "." + originalRegionName); return r.write(fs, p); } @@ -652,7 +716,7 @@ public class HRegionFileSystem { * @throws IOException */ void cleanupMergedRegion(final HRegionInfo mergedRegion) throws IOException { - Path regionDir = new Path(this.tableDir, mergedRegion.getEncodedName()); + Path regionDir = FsLayout.getRegionDir(this.tableDir, mergedRegion); if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) { throw new IOException("Failed delete of " + regionDir); } @@ -701,9 +765,7 @@ public class HRegionFileSystem { String mergingRegionName = regionInfoForFs.getEncodedName(); // Write reference with same file id only with the other region name as // suffix and into the new region location (under same family). - Path p = new Path(referenceDir, f.getPath().getName() + "." - + mergingRegionName); - return r.write(fs, p); + return createReferenceFile(r, f, mergingRegionName, referenceDir); } /** @@ -713,14 +775,11 @@ public class HRegionFileSystem { * @throws IOException */ void commitMergedRegion(final HRegionInfo mergedRegionInfo) throws IOException { - Path regionDir = new Path(this.tableDir, mergedRegionInfo.getEncodedName()); + Path regionDir = FsLayout.getRegionDir(this.tableDir, mergedRegionInfo); Path mergedRegionTmpDir = this.getMergesDir(mergedRegionInfo); // Move the tmp dir in the expected location if (mergedRegionTmpDir != null && fs.exists(mergedRegionTmpDir)) { - if (!fs.rename(mergedRegionTmpDir, regionDir)) { - throw new IOException("Unable to rename " + mergedRegionTmpDir + " to " - + regionDir); - } + moveNewRegionFromTmpDirToRegionDir(mergedRegionTmpDir, regionDir); } } @@ -741,7 +800,7 @@ public class HRegionFileSystem { * @return Content of the file we write out to the filesystem under a region * @throws IOException */ - private static byte[] getRegionInfoFileContent(final HRegionInfo hri) throws IOException { + protected static byte[] getRegionInfoFileContent(final HRegionInfo hri) throws IOException { return hri.toDelimitedByteArray(); } @@ -765,7 +824,7 @@ public class HRegionFileSystem { /** * Write the .regioninfo file on-disk. */ - private static void writeRegionInfoFileContent(final Configuration conf, final FileSystem fs, + protected static void writeRegionInfoFileContent(final Configuration conf, final FileSystem fs, final Path regionInfoFile, final byte[] content) throws IOException { // First check to get the permissions FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); @@ -816,7 +875,7 @@ public class HRegionFileSystem { * Write out an info file under the region directory. Useful recovering mangled regions. * @param useTempDir indicate whether or not using the region .tmp dir for a safer file creation. */ - private void writeRegionInfoOnFilesystem(boolean useTempDir) throws IOException { + protected void writeRegionInfoOnFilesystem(boolean useTempDir) throws IOException { byte[] content = getRegionInfoFileContent(regionInfoForFs); writeRegionInfoOnFilesystem(content, useTempDir); } @@ -826,7 +885,7 @@ public class HRegionFileSystem { * @param regionInfoContent serialized version of the {@link HRegionInfo} * @param useTempDir indicate whether or not using the region .tmp dir for a safer file creation. */ - private void writeRegionInfoOnFilesystem(final byte[] regionInfoContent, + protected void writeRegionInfoOnFilesystem(final byte[] regionInfoContent, final boolean useTempDir) throws IOException { Path regionInfoFile = new Path(getRegionDir(), REGION_INFO_FILE); if (useTempDir) { @@ -865,11 +924,14 @@ public class HRegionFileSystem { * @param fs {@link FileSystem} from which to add the region * @param tableDir {@link Path} to where the table is being stored * @param regionInfo {@link HRegionInfo} for region to be added + * @param humongousTable * @throws IOException if the region creation fails due to a FileSystem exception. */ - public static HRegionFileSystem createRegionOnFileSystem(final Configuration conf, - final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException { - HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo); + public static HRegionFileSystem createRegionOnFileSystem( + final Configuration conf, final FileSystem fs, final Path tableDir, + final HRegionInfo regionInfo) + throws IOException { + HRegionFileSystem regionFs = create(conf, fs, tableDir, regionInfo); Path regionDir = regionFs.getRegionDir(); if (fs.exists(regionDir)) { @@ -887,6 +949,43 @@ public class HRegionFileSystem { regionFs.writeRegionInfoOnFilesystem(false); return regionFs; } + + /** + * Call this only if you don't have the HRegionInfo in memory. + * This method will load it from disk. + * + * @param conf + * @param fs + * @param tableDir + * @param encodedRegionName + * @param readOnly + * @return + * @throws IOException + */ + public static HRegionFileSystem openRegionFromFileSystem(final Configuration conf, + final FileSystem fs, final Path tableDir, final String encodedRegionName, boolean readOnly) + throws IOException { + Path regionDir = FsLayout.getRegionDir(tableDir, encodedRegionName); + HRegionInfo hri = loadRegionInfoFileContent(fs, regionDir); + HRegionFileSystem regionFs = create(conf, fs, tableDir, hri); + + if (!regionFs.existsOnDisk()) { + LOG.warn("Trying to open a region that do not exists on disk: " + regionDir); + throw new IOException("The specified region do not exists on disk: " + regionDir); + } + + if (!readOnly) { + // Cleanup temporary directories + regionFs.cleanupTempDir(); + regionFs.cleanupSplitsDir(); + regionFs.cleanupMergesDir(); + + // if it doesn't exists, Write HRI to a file, in case we need to recover hbase:meta + regionFs.checkRegionInfoOnFilesystem(); + } + + return regionFs; + } /** * Open Region from file-system. @@ -900,10 +999,10 @@ public class HRegionFileSystem { public static HRegionFileSystem openRegionFromFileSystem(final Configuration conf, final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo, boolean readOnly) throws IOException { - HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo); + HRegionFileSystem regionFs = create(conf, fs, tableDir, regionInfo); Path regionDir = regionFs.getRegionDir(); - if (!fs.exists(regionDir)) { + if (!regionFs.existsOnDisk()) { LOG.warn("Trying to open a region that do not exists on disk: " + regionDir); throw new IOException("The specified region do not exists on disk: " + regionDir); } @@ -920,7 +1019,32 @@ public class HRegionFileSystem { return regionFs; } - + + /** + * Does the region directory for this HRFS instance exist on disk + * @return true if the region directory exists + * @throws IOException + */ + public boolean existsOnDisk() throws IOException { + Path regionDir = getRegionDir(); + return fs.exists(regionDir); + } + + /** + * Delete the region directory if exists. + * @param conf + * @param hri + * @return True if deleted the region directory. + * @throws IOException + */ + public static boolean deleteRegionDir(final Configuration conf, final HRegionInfo hri) + throws IOException { + Path rootDir = FSUtils.getRootDir(conf); + FileSystem fs = rootDir.getFileSystem(conf); + return FSUtils.deleteDirectory(fs, + FsLayout.getRegionDir(FSUtils.getTableDir(rootDir, hri.getTable()), hri.getEncodedName())); + } + /** * Remove the region from the table directory, archiving the region's hfiles. * @param conf the {@link Configuration} to use @@ -929,9 +1053,9 @@ public class HRegionFileSystem { * @param regionInfo {@link HRegionInfo} for region to be deleted * @throws IOException if the request cannot be completed */ - public static void deleteRegionFromFileSystem(final Configuration conf, + public static void deleteAndArchiveRegionFromFileSystem(final Configuration conf, final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException { - HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo); + HRegionFileSystem regionFs = create(conf, fs, tableDir, regionInfo); Path regionDir = regionFs.getRegionDir(); if (!fs.exists(regionDir)) { @@ -1037,7 +1161,7 @@ public class HRegionFileSystem { /** * sleeping logic; handles the interrupt exception. */ - private void sleepBeforeRetry(String msg, int sleepMultiplier) throws InterruptedException { + protected void sleepBeforeRetry(String msg, int sleepMultiplier) throws InterruptedException { sleepBeforeRetry(msg, sleepMultiplier, baseSleepBeforeRetries, hdfsClientRetriesNumber); } @@ -1051,7 +1175,7 @@ public class HRegionFileSystem { * whether the directory exists or not, and returns true if it exists. * @throws IOException */ - private static boolean createDirOnFileSystem(FileSystem fs, Configuration conf, Path dir) + protected static boolean createDirOnFileSystem(FileSystem fs, Configuration conf, Path dir) throws IOException { int i = 0; IOException lastIOE = null; @@ -1080,7 +1204,7 @@ public class HRegionFileSystem { * sleeping logic for static methods; handles the interrupt exception. Keeping a static version * for this to avoid re-looking for the integer values. */ - private static void sleepBeforeRetry(String msg, int sleepMultiplier, int baseSleepBeforeRetries, + protected static void sleepBeforeRetry(String msg, int sleepMultiplier, int baseSleepBeforeRetries, int hdfsClientRetriesNumber) throws InterruptedException { if (sleepMultiplier > hdfsClientRetriesNumber) { LOG.debug(msg + ", retries exhausted"); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystemFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystemFactory.java new file mode 100644 index 0000000..d278624 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystemFactory.java @@ -0,0 +1,30 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HRegionInfo; + +public class HRegionFileSystemFactory { + public HRegionFileSystem create(final Configuration conf, final FileSystem fs, final Path tableDir, + final HRegionInfo regionInfo) { + return new HRegionFileSystem(conf, fs, tableDir, regionInfo); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 33d4e1e..85eac25 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.conf.ConfigurationManager; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.crypto.Cipher; import org.apache.hadoop.hbase.io.crypto.Encryption; @@ -475,7 +476,8 @@ public class HStore implements Store { @Deprecated public static Path getStoreHomedir(final Path tabledir, final String encodedName, final byte[] family) { - return new Path(tabledir, new Path(encodedName, Bytes.toString(family))); + return new Path(FsLayout.getRegionDir(tabledir, encodedName), + Bytes.toString(family)); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystem.java new file mode 100644 index 0000000..5378a69 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystem.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.fs.layout.StandardHBaseFsLayout; + +import com.google.common.annotations.VisibleForTesting; + +public class HierarchicalHRegionFileSystem extends HRegionFileSystem { + protected HierarchicalHRegionFileSystem(Configuration conf, FileSystem fs, Path tableDir, + HRegionInfo regionInfo) { + super(conf, fs, tableDir, regionInfo); + } + + @Override + void moveNewRegionFromTmpDirToRegionDir(Path source, Path dest) throws IOException { + fs.mkdirs(dest.getParent()); + super.moveNewRegionFromTmpDirToRegionDir(source, dest); + } + + // Probably will never use this function for real, just in tests to compare + // humongous vs regular region dir functionality + @VisibleForTesting + public Path getStandadHBaseRegionDir() { + return StandardHBaseFsLayout.get().getRegionDir(tableDir, regionInfoForFs.getEncodedName()); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystemFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystemFactory.java new file mode 100644 index 0000000..fbca254 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HierarchicalHRegionFileSystemFactory.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HRegionInfo; + +public class HierarchicalHRegionFileSystemFactory extends HRegionFileSystemFactory { + @Override + public HRegionFileSystem create(Configuration conf, FileSystem fs, Path tableDir, + HRegionInfo regionInfo) { + return new HierarchicalHRegionFileSystem(conf, fs, tableDir, regionInfo); + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java index 95be873..a7f8495 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java @@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Regio import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HasThread; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PairOfSameType; @@ -359,35 +358,35 @@ public class SplitTransactionImpl implements SplitTransaction { transition(SplitTransactionPhase.STARTED_REGION_A_CREATION); - assertReferenceFileCount(expectedReferences.getFirst(), - this.parent.getRegionFileSystem().getSplitsDir(this.hri_a)); + assertReferenceFileCountOfSplitsDir(expectedReferences.getFirst(), this.hri_a); HRegion a = this.parent.createDaughterRegionFromSplits(this.hri_a); - assertReferenceFileCount(expectedReferences.getFirst(), - new Path(this.parent.getRegionFileSystem().getTableDir(), this.hri_a.getEncodedName())); + assertReferenceFileCountOfDaughterDir(expectedReferences.getFirst(), this.hri_a); // Ditto transition(SplitTransactionPhase.STARTED_REGION_B_CREATION); - assertReferenceFileCount(expectedReferences.getSecond(), - this.parent.getRegionFileSystem().getSplitsDir(this.hri_b)); + assertReferenceFileCountOfSplitsDir(expectedReferences.getSecond(), this.hri_b); HRegion b = this.parent.createDaughterRegionFromSplits(this.hri_b); - assertReferenceFileCount(expectedReferences.getSecond(), - new Path(this.parent.getRegionFileSystem().getTableDir(), this.hri_b.getEncodedName())); + assertReferenceFileCountOfDaughterDir(expectedReferences.getSecond(), this.hri_b); return new PairOfSameType(a, b); } - + @VisibleForTesting - void assertReferenceFileCount(int expectedReferenceFileCount, Path dir) + void assertReferenceFileCountOfSplitsDir(int expectedReferenceFileCount, HRegionInfo daughter) throws IOException { - if (expectedReferenceFileCount != 0 && - expectedReferenceFileCount != FSUtils.getRegionReferenceFileCount(parent.getFilesystem(), - dir)) { - throw new IOException("Failing split. Expected reference file count isn't equal."); - } + this.parent.getRegionFileSystem().assertReferenceFileCountOfSplitsDir( + expectedReferenceFileCount, daughter); } + @VisibleForTesting + void assertReferenceFileCountOfDaughterDir(int expectedReferenceFileCount, HRegionInfo daughter) + throws IOException { + this.parent.getRegionFileSystem().assertReferenceFileCountOfDaughterDir( + expectedReferenceFileCount, daughter); + } + /** * Perform time consuming opening of the daughter regions. * @param server Hosting server instance. Can be null when testing diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java index 6516a3e..c5ef7fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java @@ -27,6 +27,7 @@ import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -63,7 +64,7 @@ public class StoreFileInfo { * Group 1, hfile/hfilelink pattern, is this file's id. * Group 2 '(.+)' is the reference's parent region name. */ - private static final Pattern REF_NAME_PATTERN = + public static final Pattern REF_NAME_PATTERN = Pattern.compile(String.format("^(%s|%s)\\.(.+)$", HFILE_NAME_REGEX, HFileLink.LINK_NAME_REGEX)); @@ -387,36 +388,38 @@ public class StoreFileInfo { Matcher m = REF_NAME_PATTERN.matcher(name); return m.matches() && m.groupCount() > 1; } - + /* * Return path to the file referred to by a Reference. Presumes a directory * hierarchy of ${hbase.rootdir}/data/${namespace}/tablename/regionname/familyname. + * Unless the table is a humongous table in which case the hierarchy is + * ${hbase.rootdir}/data/${namespace}/tablename/bucket/regionname/familyname. + * * @param p Path to a Reference file. * @return Calculated path to parent region file. * @throws IllegalArgumentException when path regex fails to match. */ - public static Path getReferredToFile(final Path p) { - Matcher m = REF_NAME_PATTERN.matcher(p.getName()); + public static Path getReferredToFile(Path p) { + Matcher m = StoreFileInfo.REF_NAME_PATTERN.matcher(p.getName()); if (m == null || !m.matches()) { LOG.warn("Failed match of store file name " + p.toString()); throw new IllegalArgumentException("Failed match of store file name " + p.toString()); } - + // Other region name is suffix on the passed Reference file name String otherRegion = m.group(2); // Tabledir is up two directories from where Reference was written. - Path tableDir = p.getParent().getParent().getParent(); + Path regionDir = p.getParent().getParent(); + Path tableDir = FsLayout.getTableDirFromRegionDir(regionDir); String nameStrippedOfSuffix = m.group(1); if (LOG.isDebugEnabled()) { LOG.debug("reference '" + p + "' to region=" + otherRegion + " hfile=" + nameStrippedOfSuffix); } - - // Build up new path with the referenced region in place of our current - // region in the reference path. Also strip regionname suffix from name. - return new Path(new Path(new Path(tableDir, otherRegion), - p.getParent().getName()), nameStrippedOfSuffix); + + return new Path(new Path(FsLayout.getRegionDir(tableDir, otherRegion), p.getParent() + .getName()), nameStrippedOfSuffix); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index 5021c74..3baeeb2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -35,6 +35,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FSDataInputStream; @@ -205,8 +206,11 @@ public class ExportSnapshot extends Configured implements Tool { TableName table =HFileLink.getReferencedTableName(inputPath.getName()); String region = HFileLink.getReferencedRegionName(inputPath.getName()); String hfile = HFileLink.getReferencedHFileName(inputPath.getName()); - path = new Path(FSUtils.getTableDir(new Path("./"), table), - new Path(region, new Path(family, hfile))); + // TODO: Currently assumes target cluster's layout is same as source cluster layout + // Add another config option? + Path tableDir = FSUtils.getTableDir(new Path("./"), table); + Path regionDir = FsLayout.getRegionDir(tableDir, region); + path = new Path(regionDir, new Path(family, hfile)); break; case WAL: Path oldLogsDir = new Path(outputRoot, HConstants.HREGION_OLDLOGDIR_NAME); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java index 441dbbf..e4b29ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.monitoring.MonitoredTask; @@ -399,7 +400,7 @@ public class RestoreSnapshotHelper { Map> snapshotFiles = getRegionHFileReferences(regionManifest); - Path regionDir = new Path(tableDir, regionInfo.getEncodedName()); + Path regionDir = FsLayout.getRegionDir(tableDir, regionInfo); String tableName = tableDesc.getTableName().getNameAsString(); // Restore families present in the table @@ -531,7 +532,7 @@ public class RestoreSnapshotHelper { */ private void cloneRegion(final HRegion region, final HRegionInfo snapshotRegionInfo, final SnapshotRegionManifest manifest) throws IOException { - final Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName()); + final Path regionDir = FsLayout.getRegionDir(tableDir, region.getRegionInfo()); final String tableName = tableDesc.getTableName().getNameAsString(); for (SnapshotRegionManifest.FamilyFiles familyFiles: manifest.getFamilyFilesList()) { Path familyDir = new Path(regionDir, familyFiles.getFamilyName().toStringUtf8()); @@ -588,12 +589,12 @@ public class RestoreSnapshotHelper { final SnapshotRegionManifest.StoreFile storeFile) throws IOException { String hfileName = storeFile.getName(); - // Extract the referred information (hfile name and parent region) - Path refPath = StoreFileInfo.getReferredToFile(new Path(new Path(new Path( - snapshotTable.getNameAsString(), regionInfo.getEncodedName()), familyDir.getName()), - hfileName)); - String snapshotRegionName = refPath.getParent().getParent().getName(); - String fileName = refPath.getName(); + Path referenceFile = new Path(new Path(FsLayout.getRegionDir(new Path( + snapshotTable.getNameAsString()), regionInfo), familyDir.getName()), hfileName); + Path referredToFile = StoreFileInfo.getReferredToFile(referenceFile); + + String snapshotRegionName = referredToFile.getParent().getParent().getName(); + String fileName = referredToFile.getName(); // The new reference should have the cloned region name as parent, if it is a clone. String clonedRegionName = Bytes.toString(regionsMap.get(Bytes.toBytes(snapshotRegionName))); @@ -619,8 +620,7 @@ public class RestoreSnapshotHelper { if (linkPath != null) { in = HFileLink.buildFromHFileLinkPattern(conf, linkPath).open(fs); } else { - linkPath = new Path(new Path(HRegion.getRegionDir(snapshotManifest.getSnapshotDir(), - regionInfo.getEncodedName()), familyDir.getName()), hfileName); + linkPath = FsLayout.makeHFileLinkPath(snapshotManifest, regionInfo, familyDir.getName(), hfileName); in = fs.open(linkPath); } OutputStream out = fs.create(outPath); @@ -651,8 +651,9 @@ public class RestoreSnapshotHelper { */ public HRegionInfo cloneRegionInfo(final HRegionInfo snapshotRegionInfo) { HRegionInfo regionInfo = new HRegionInfo(tableDesc.getTableName(), - snapshotRegionInfo.getStartKey(), snapshotRegionInfo.getEndKey(), - snapshotRegionInfo.isSplit(), snapshotRegionInfo.getRegionId()); + snapshotRegionInfo.getStartKey(), + snapshotRegionInfo.getEndKey(), snapshotRegionInfo.isSplit(), + snapshotRegionInfo.getRegionId()); regionInfo.setOffline(snapshotRegionInfo.isOffline()); return regionInfo; } @@ -662,7 +663,7 @@ public class RestoreSnapshotHelper { */ private List getTableRegions() throws IOException { LOG.debug("get table regions: " + tableDir); - FileStatus[] regionDirs = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs)); + List regionDirs = FsLayout.getRegionDirFileStats(fs, tableDir, new FSUtils.RegionDirFilter(fs)); if (regionDirs == null) return null; List regions = new LinkedList(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java index 137acf3..ad7c93a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV1.java @@ -31,6 +31,7 @@ import java.util.concurrent.ExecutorCompletionService; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -114,7 +115,7 @@ public class SnapshotManifestV1 { static List loadRegionManifests(final Configuration conf, final Executor executor,final FileSystem fs, final Path snapshotDir, final SnapshotDescription desc) throws IOException { - FileStatus[] regions = FSUtils.listStatus(fs, snapshotDir, new FSUtils.RegionDirFilter(fs)); + List regions = FsLayout.getRegionDirFileStats(fs, snapshotDir, new FSUtils.RegionDirFilter(fs)); if (regions == null) { LOG.info("No regions under directory:" + snapshotDir); return null; @@ -133,9 +134,9 @@ public class SnapshotManifestV1 { } ArrayList regionsManifest = - new ArrayList(regions.length); + new ArrayList(regions.size()); try { - for (int i = 0; i < regions.length; ++i) { + for (int i = 0; i < regions.size(); ++i) { regionsManifest.add(completionService.take().get()); } } catch (InterruptedException e) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java index dccbeb5..b0519fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifestV2.java @@ -118,6 +118,8 @@ public class SnapshotManifestV2 { static List loadRegionManifests(final Configuration conf, final Executor executor,final FileSystem fs, final Path snapshotDir, final SnapshotDescription desc) throws IOException { + // TODO: Not sure if there's anything I need to do here, made a path change + // in SnapshotManifestV1 but doesn't seem anything needed here. FileStatus[] manifestFiles = FSUtils.listStatus(fs, snapshotDir, new PathFilter() { @Override public boolean accept(Path path) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 6d10351..ce51e27 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -64,13 +64,14 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.fs.HFileSystem; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.master.HMaster; +import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.security.AccessDeniedException; import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.FSProtos; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSHedgedReadMetrics; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -261,21 +262,6 @@ public abstract class FSUtils { } /** - * Delete the region directory if exists. - * @param conf - * @param hri - * @return True if deleted the region directory. - * @throws IOException - */ - public static boolean deleteRegionDir(final Configuration conf, final HRegionInfo hri) - throws IOException { - Path rootDir = getRootDir(conf); - FileSystem fs = rootDir.getFileSystem(conf); - return deleteDirectory(fs, - new Path(getTableDir(rootDir, hri.getTable()), hri.getEncodedName())); - } - - /** * Return the number of bytes that large input files should be optimally * be split into to minimize i/o time. * @@ -660,7 +646,8 @@ public abstract class FSUtils { throws IOException, DeserializationException { String version = getVersion(fs, rootdir); if (version == null) { - if (!metaRegionExists(fs, rootdir)) { + // TODO: Doesn't feel like a utility should have a dependency like this + if (!MasterFileSystem.metaRegionExists(fs, rootdir)) { // rootDir is empty (no version file and no root region) // just create new version file (HBASE-1195) setVersion(fs, rootdir, wait, retries); @@ -1017,22 +1004,6 @@ public abstract class FSUtils { } /** - * Checks if meta region exists - * - * @param fs file system - * @param rootdir root directory of HBase installation - * @return true if exists - * @throws IOException e - */ - @SuppressWarnings("deprecation") - public static boolean metaRegionExists(FileSystem fs, Path rootdir) - throws IOException { - Path metaRegionDir = - HRegion.getRegionDir(rootdir, HRegionInfo.FIRST_META_REGIONINFO); - return fs.exists(metaRegionDir); - } - - /** * Compute HDFS blocks distribution of a given file, or a portion of the file * @param fs file system * @param status file status of the file @@ -1070,10 +1041,10 @@ public abstract class FSUtils { final Path hbaseRootDir) throws IOException { List tableDirs = getTableDirs(fs, hbaseRootDir); - PathFilter regionFilter = new RegionDirFilter(fs); + RegionDirFilter regionFilter = new RegionDirFilter(fs); PathFilter familyFilter = new FamilyDirFilter(fs); - for (Path d : tableDirs) { - FileStatus[] regionDirs = fs.listStatus(d, regionFilter); + for (Path tableDir : tableDirs) { + List regionDirs = FsLayout.getRegionDirFileStats(fs, tableDir, regionFilter); for (FileStatus regionDir : regionDirs) { Path dd = regionDir.getPath(); // Else its a region name. Now look in region for families. @@ -1143,13 +1114,13 @@ public abstract class FSUtils { Map frags = new HashMap(); int cfCountTotal = 0; int cfFragTotal = 0; - PathFilter regionFilter = new RegionDirFilter(fs); + RegionDirFilter regionFilter = new RegionDirFilter(fs); PathFilter familyFilter = new FamilyDirFilter(fs); List tableDirs = getTableDirs(fs, hbaseRootDir); - for (Path d : tableDirs) { + for (Path tableDir : tableDirs) { int cfCount = 0; int cfFrag = 0; - FileStatus[] regionDirs = fs.listStatus(d, regionFilter); + List regionDirs = FsLayout.getRegionDirFileStats(fs, tableDir, regionFilter); for (FileStatus regionDir : regionDirs) { Path dd = regionDir.getPath(); // else its a region name, now look in region for families @@ -1167,7 +1138,7 @@ public abstract class FSUtils { } } // compute percentage per table and store in result list - frags.put(FSUtils.getTableName(d).getNameAsString(), + frags.put(FSUtils.getTableName(tableDir).getNameAsString(), cfCount == 0? 0: Math.round((float) cfFrag / cfCount * 100)); } // set overall percentage for all tables @@ -1433,25 +1404,6 @@ public abstract class FSUtils { } /** - * Given a particular table dir, return all the regiondirs inside it, excluding files such as - * .tableinfo - * @param fs A file system for the Path - * @param tableDir Path to a specific table directory <hbase.rootdir>/<tabledir> - * @return List of paths to valid region directories in table dir. - * @throws IOException - */ - public static List getRegionDirs(final FileSystem fs, final Path tableDir) throws IOException { - // assumes we are in a table dir. - FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs)); - List regionDirs = new ArrayList(rds.length); - for (FileStatus rdfs: rds) { - Path rdPath = rdfs.getPath(); - regionDirs.add(rdPath); - } - return regionDirs; - } - - /** * Filter for all dirs that are legal column family names. This is generally used for colfam * dirs <hbase.rootdir>/<tabledir>/<regiondir>/<colfamdir>. */ @@ -1616,12 +1568,11 @@ public abstract class FSUtils { // Inside a table, there are compaction.dir directories to skip. Otherwise, all else // should be regions. PathFilter familyFilter = new FamilyDirFilter(fs); - FileStatus[] regionDirs = fs.listStatus(tableDir, new RegionDirFilter(fs)); - for (FileStatus regionDir : regionDirs) { + List regionDirs = FsLayout.getRegionDirPaths(fs, tableDir); + for (Path dd : regionDirs) { if (null != errors) { errors.progress(); } - Path dd = regionDir.getPath(); // else its a region name, now look in region for families FileStatus[] familyDirs = fs.listStatus(dd, familyFilter); for (FileStatus familyDir : familyDirs) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java index f0cc0c1..efd8124 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSVisitor.java @@ -19,11 +19,13 @@ package org.apache.hadoop.hbase.util; import java.io.IOException; +import java.util.List; import java.util.NavigableSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -71,7 +73,7 @@ public final class FSVisitor { */ public static void visitRegions(final FileSystem fs, final Path tableDir, final RegionVisitor visitor) throws IOException { - FileStatus[] regions = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs)); + List regions = FsLayout.getRegionDirFileStats(fs, tableDir, new FSUtils.RegionDirFilter(fs)); if (regions == null) { if (LOG.isTraceEnabled()) { LOG.trace("No regions under directory:" + tableDir); @@ -94,7 +96,7 @@ public final class FSVisitor { */ public static void visitTableStoreFiles(final FileSystem fs, final Path tableDir, final StoreFileVisitor visitor) throws IOException { - FileStatus[] regions = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs)); + List regions = FsLayout.getRegionDirFileStats(fs, tableDir, new FSUtils.RegionDirFilter(fs)); if (regions == null) { if (LOG.isTraceEnabled()) { LOG.trace("No regions under directory:" + tableDir); @@ -156,7 +158,7 @@ public final class FSVisitor { */ public static void visitTableRecoveredEdits(final FileSystem fs, final Path tableDir, final FSVisitor.RecoveredEditsVisitor visitor) throws IOException { - FileStatus[] regions = FSUtils.listStatus(fs, tableDir, new FSUtils.RegionDirFilter(fs)); + List regions = FsLayout.getRegionDirFileStats(fs, tableDir, new FSUtils.RegionDirFilter(fs)); if (regions == null) { if (LOG.isTraceEnabled()) { LOG.trace("No recoveredEdits regions under directory:" + tableDir); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index cc87f64..98ad22b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -62,6 +62,7 @@ import com.google.common.collect.Multimap; import com.google.common.collect.Ordering; import com.google.common.collect.TreeMultimap; import com.google.protobuf.ServiceException; + import org.apache.commons.io.IOUtils; import org.apache.commons.lang.RandomStringUtils; import org.apache.commons.lang.StringUtils; @@ -109,6 +110,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.client.TableState; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.master.MasterFileSystem; @@ -754,13 +756,14 @@ public class HBaseFsck extends Configured implements Closeable { final RegionBoundariesInformation currentRegionBoundariesInformation = new RegionBoundariesInformation(); Path hbaseRoot = FSUtils.getRootDir(getConf()); + FileSystem fs = hbaseRoot.getFileSystem(getConf()); for (HRegionInfo regionInfo : regions) { Path tableDir = FSUtils.getTableDir(hbaseRoot, regionInfo.getTable()); currentRegionBoundariesInformation.regionName = regionInfo.getRegionName(); // For each region, get the start and stop key from the META and compare them to the // same information from the Stores. - Path path = new Path(tableDir, regionInfo.getEncodedName()); - FileSystem fs = path.getFileSystem(getConf()); + HRegionFileSystem hrfs = HRegionFileSystem.create(getConf(), fs, tableDir, regionInfo); + Path path = hrfs.getRegionDir(); FileStatus[] files = fs.listStatus(path); // For all the column families in this region... byte[] storeFirstKey = null; @@ -2276,8 +2279,8 @@ public class HBaseFsck extends Configured implements Closeable { LOG.warn(hri + " start and stop keys are in the range of " + region + ". The region might not be cleaned up from hdfs when region " + region + " split failed. Hence deleting from hdfs."); - HRegionFileSystem.deleteRegionFromFileSystem(getConf(), fs, - regionDir.getParent(), hri); + HRegionFileSystem.deleteAndArchiveRegionFromFileSystem(getConf(), fs, + FsLayout.getTableDirFromRegionDir(regionDir), hri); return; } } @@ -2804,8 +2807,8 @@ public class HBaseFsck extends Configured implements Closeable { + "region and regioninfo in HDFS to plug the hole.", getTableInfo()); HTableDescriptor htd = getTableInfo().getHTD(); // from curEndKey to EMPTY_START_ROW - HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), curEndKey, - HConstants.EMPTY_START_ROW); + HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), + curEndKey, HConstants.EMPTY_START_ROW); HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); LOG.info("Table region end key was not empty. Created new empty region: " + newRegion @@ -2915,8 +2918,8 @@ public class HBaseFsck extends Configured implements Closeable { // create new empty container region. HTableDescriptor htd = getTableInfo().getHTD(); // from start key to end Key - HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), range.getFirst(), - range.getSecond()); + HRegionInfo newRegion = new HRegionInfo(htd.getTableName(), + range.getFirst(), range.getSecond()); HRegion region = HBaseFsckRepair.createHDFSRegionDir(conf, newRegion, htd); LOG.info("[" + thread + "] Created new empty container region: " + newRegion + " to contain regions: " + Joiner.on(",").join(overlap)); @@ -3601,7 +3604,7 @@ public class HBaseFsck extends Configured implements Closeable { } else if (this.hdfsEntry != null) { // we are only guaranteed to have a path and not an HRI for hdfsEntry, // so we get the name from the Path - Path tableDir = this.hdfsEntry.hdfsRegionDir.getParent(); + Path tableDir = FsLayout.getTableDirFromRegionDir(this.hdfsEntry.hdfsRegionDir); return FSUtils.getTableName(tableDir); } else { // return the info from the first online/deployed hri @@ -4038,7 +4041,11 @@ public class HBaseFsck extends Configured implements Closeable { public synchronized Void call() throws IOException { try { // level 2: //* - FileStatus[] regionDirs = fs.listStatus(tableDir.getPath()); + List regionDirs = FsLayout.getRegionDirFileStats(fs, tableDir.getPath(), + new FSUtils.RegionDirFilter(fs)); + if (regionDirs == null) { + return null; + } for (FileStatus regionDir : regionDirs) { errors.progress(); String encodedName = regionDir.getPath().getName(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java index 937e9b2..1df19bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java @@ -24,7 +24,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HStore; /** @@ -72,39 +71,6 @@ public class HFileArchiveUtil { } /** - * Get the archive directory for a given region under the specified table - * @param tableName the table name. Cannot be null. - * @param regiondir the path to the region directory. Cannot be null. - * @return {@link Path} to the directory to archive the given region, or null if it - * should not be archived - */ - public static Path getRegionArchiveDir(Path rootDir, - TableName tableName, - Path regiondir) { - // get the archive directory for a table - Path archiveDir = getTableArchivePath(rootDir, tableName); - - // then add on the region path under the archive - String encodedRegionName = regiondir.getName(); - return HRegion.getRegionDir(archiveDir, encodedRegionName); - } - - /** - * Get the archive directory for a given region under the specified table - * @param rootDir {@link Path} to the root directory where hbase files are stored (for building - * the archive path) - * @param tableName name of the table to archive. Cannot be null. - * @return {@link Path} to the directory to archive the given region, or null if it - * should not be archived - */ - public static Path getRegionArchiveDir(Path rootDir, - TableName tableName, String encodedRegionName) { - // get the archive directory for a table - Path archiveDir = getTableArchivePath(rootDir, tableName); - return HRegion.getRegionDir(archiveDir, encodedRegionName); - } - - /** * Get the path to the table archive directory based on the configured archive directory. *

* Get the path to the table's archive directory. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java index 29ab24e..303ed60 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java @@ -34,6 +34,7 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -217,8 +218,9 @@ public class HFileCorruptionChecker { * @throws IOException */ void checkTableDir(Path tableDir) throws IOException { - FileStatus[] rds = fs.listStatus(tableDir, new RegionDirFilter(fs)); - if (rds.length == 0 && !fs.exists(tableDir)) { + // TODO: Maybe a TableFileSystem?? + List rds = FsLayout.getRegionDirPaths(fs, tableDir); + if (rds.size() == 0 && !fs.exists(tableDir)) { // interestingly listStatus does not throw an exception if the path does not exist. LOG.warn("Table Directory " + tableDir + " does not exist. Likely due to concurrent delete. Skipping."); @@ -230,8 +232,7 @@ public class HFileCorruptionChecker { List rdcs = new ArrayList(); List> rdFutures; - for (FileStatus rdFs : rds) { - Path rdDir = rdFs.getPath(); + for (Path rdDir : rds) { RegionDirChecker work = new RegionDirChecker(rdDir); rdcs.add(work); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 9a26a24..9d92fd3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -81,6 +81,7 @@ import org.apache.hadoop.hbase.client.TableState; import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager; import org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination; import org.apache.hadoop.hbase.exceptions.RegionOpeningException; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.master.SplitLogManager; import org.apache.hadoop.hbase.monitoring.MonitoredTask; @@ -512,21 +513,19 @@ public class WALSplitter { * @return Path to file into which to dump split log edits. * @throws IOException */ - @SuppressWarnings("deprecation") static Path getRegionSplitEditsPath(final FileSystem fs, final Entry logEntry, final Path rootDir, boolean isCreate) throws IOException { Path tableDir = FSUtils.getTableDir(rootDir, logEntry.getKey().getTablename()); String encodedRegionName = Bytes.toString(logEntry.getKey().getEncodedRegionName()); - Path regiondir = HRegion.getRegionDir(tableDir, encodedRegionName); - Path dir = getRegionDirRecoveredEditsDir(regiondir); - + Path regiondir = FsLayout.getRegionDir(tableDir, encodedRegionName); if (!fs.exists(regiondir)) { - LOG.info("This region's directory doesn't exist: " - + regiondir.toString() + ". It is very likely that it was" + - " already split so it's safe to discard those edits."); - return null; + LOG.info("This region's directory doesn't exist." + + " It is very likely that it was" + + " already split so it's safe to discard those edits."); + return null; } + Path dir = getRegionDirRecoveredEditsDir(regiondir); if (fs.exists(dir) && fs.isFile(dir)) { Path tmp = new Path("/tmp"); if (!fs.exists(tmp)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index e8b79a8..0cbd2f6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1316,7 +1316,25 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { return createTable(tableName, new byte[][] { family }, splitKeys); } + + /** + * Create a table with multiple regions. + * @param desc + * @param family + * @param numRegions + * @return An HTable instance for the created table. + * @throws IOException + */ + public HTable createMultiRegionTable(HTableDescriptor desc, byte[] family, int numRegions) + throws IOException { + if (numRegions < 3) throw new IOException("Must create at least 3 regions"); + byte[] startKey = Bytes.toBytes("aaaaa"); + byte[] endKey = Bytes.toBytes("zzzzz"); + byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3); + return createTable(desc, new byte[][] { family }, splitKeys, + new Configuration(getConfiguration())); + } /** * Create a table. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHumongousTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHumongousTable.java new file mode 100644 index 0000000..2c0fd6e --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHumongousTable.java @@ -0,0 +1,184 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.client.Connection; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.fs.layout.FsLayout; +import org.apache.hadoop.hbase.fs.layout.HierarchicalFsLayout; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; +import org.apache.hadoop.hbase.regionserver.HierarchicalHRegionFileSystem; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({MediumTests.class, MiscTests.class}) +public class TestHumongousTable { + protected static final Log LOG = LogFactory.getLog(TestHumongousTable.class); + protected final static int NUM_SLAVES_BASE = 4; + private static HBaseTestingUtility TEST_UTIL; + private static Configuration CONF; + protected static HBaseAdmin ADMIN; + protected static FileSystem FS; + + @BeforeClass + public static void setUp() throws Exception { + FsLayout.setLayoutForTesting(HierarchicalFsLayout.get()); + TEST_UTIL = new HBaseTestingUtility(); + CONF = TEST_UTIL.getConfiguration(); + TEST_UTIL.startMiniCluster(NUM_SLAVES_BASE); + ADMIN = TEST_UTIL.getHBaseAdmin(); + LOG.info("Done initializing cluster"); + } + + @AfterClass + public static void tearDown() throws Exception { + try { + TEST_UTIL.shutdownMiniCluster(); + } finally { + FsLayout.reset(); + } + } + + @Before + public void beforeMethod() throws IOException { + for (HTableDescriptor desc : ADMIN.listTables(".*")) { + ADMIN.disableTable(desc.getTableName()); + ADMIN.deleteTable(desc.getTableName()); + } + } + + @Test(timeout = 60000) + public void testCreateHumongousTable() throws IOException, InterruptedException { + // create a humongous table with splits + String tableNameStr = "testCreateHumongousTable"; + TableName tableName = TableName.valueOf(tableNameStr); + String familyName = "col"; + TableName testTable = TableName.valueOf(tableNameStr); + HTableDescriptor desc = new HTableDescriptor(testTable); + HColumnDescriptor family = new HColumnDescriptor(familyName); + desc.addFamily(family); + ADMIN.createTable(desc, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); + + // check tableDir and table descriptor + Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), testTable); + FS = TEST_UTIL.getTestFileSystem(); + assertTrue(FS.exists(tableDir)); + + // load table with rows and flush stores + Connection connection = TEST_UTIL.getConnection(); + Table table = connection.getTable(testTable); + int rowCount = TEST_UTIL.loadTable(table, Bytes.toBytes(familyName)); + ADMIN.flush(tableName); + assertEquals(TEST_UTIL.countRows(table), rowCount); + + verifyColumnFamilies(desc, testTable, familyName); + + // test alteration of humongous table too + String familyName2 = "col2"; + HColumnDescriptor family2 = new HColumnDescriptor(familyName2); + ADMIN.addColumnFamily(tableName, family2); + + // Wait for async add column to finish + Thread.sleep(5000); + + TEST_UTIL.loadTable(table, Bytes.toBytes(familyName2)); + ADMIN.flush(tableName); + + verifyColumnFamilies(desc, testTable, familyName, familyName2); + + // drop the test table + ADMIN.disableTable(testTable); + assertTrue(ADMIN.isTableDisabled(testTable)); + ADMIN.deleteTable(testTable); + assertEquals(ADMIN.getTableRegions(testTable), null); + assertFalse(FS.exists(tableDir)); + } + + private void verifyColumnFamilies(HTableDescriptor desc, TableName testTable, String... colFamNames) throws IOException { + Path tableDir = FSUtils.getTableDir(TEST_UTIL.getDefaultRootDirPath(), testTable); + + List tableRegions = ADMIN.getTableRegions(testTable); + + // check region dirs and files on fs + for (HRegionInfo hri : tableRegions) { + // check region dir structure + HierarchicalHRegionFileSystem hrfs = (HierarchicalHRegionFileSystem) HRegionFileSystem.openRegionFromFileSystem( + CONF, FS, tableDir, hri, true); + + Path humongousRegionDir = hrfs.getRegionDir(); + Path normalRegionDir = hrfs.getStandadHBaseRegionDir(); + + assertTrue(FS.exists(humongousRegionDir)); + assertFalse(FS.exists(normalRegionDir)); + + String bucket = hri.getEncodedName().substring( + HRegionInfo.MD5_HEX_LENGTH + - HRegionFileSystem.HUMONGOUS_DIR_NAME_SIZE); + assertEquals(humongousRegionDir.getParent().getName(), bucket); + + FileStatus[] statList = FS.listStatus(humongousRegionDir); + Set contents = new HashSet(); + + LOG.debug("Contents of humongous region dir: " + contents); + + for (FileStatus stat : statList) { + contents.add(stat.getPath().getName()); + } + + assertTrue(contents.contains(HRegionFileSystem.REGION_INFO_FILE)); + assertTrue(contents.contains(HConstants.HBASE_TEMP_DIRECTORY)); + assertTrue(contents.contains(HConstants.RECOVERED_EDITS_DIR)); + + for (String colFam : colFamNames) { + assertTrue(contents.contains(colFam)); + + // familyDir has one store file + Path famPath = new Path(humongousRegionDir, colFam); + assertEquals(1, FS.listStatus(famPath).length); + } + + assertEquals("Contents: " + contents + " and fam names: " + Arrays.toString(colFamNames), + 3 + colFamNames.length, contents.size()); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java index e30d719..2021ebb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java @@ -35,13 +35,17 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -66,8 +70,9 @@ import org.junit.experimental.categories.Category; public class TestHFileArchiving { private static final Log LOG = LogFactory.getLog(TestHFileArchiving.class); - private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); - private static final byte[] TEST_FAM = Bytes.toBytes("fam"); + static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); + static final byte[] TEST_FAM = Bytes.toBytes("fam"); + static final byte[] TEST_FAM_2 = Bytes.toBytes("fam2"); /** * Setup the config for the cluster @@ -117,7 +122,7 @@ public class TestHFileArchiving { public void testRemovesRegionDirOnArchive() throws Exception { TableName TABLE_NAME = TableName.valueOf("testRemovesRegionDirOnArchive"); - UTIL.createTable(TABLE_NAME, TEST_FAM); + createTable(TABLE_NAME); final Admin admin = UTIL.getHBaseAdmin(); @@ -136,8 +141,7 @@ public class TestHFileArchiving { FileSystem fs = UTIL.getTestFileSystem(); // now attempt to depose the region - Path rootDir = region.getRegionFileSystem().getTableDir().getParent(); - Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo()); + Path regionDir = region.getRegionFileSystem().getRegionDir(); HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo()); @@ -177,7 +181,7 @@ public class TestHFileArchiving { public void testDeleteRegionWithNoStoreFiles() throws Exception { TableName TABLE_NAME = TableName.valueOf("testDeleteRegionWithNoStoreFiles"); - UTIL.createTable(TABLE_NAME, TEST_FAM); + createTable(TABLE_NAME); // get the current store files for the region List servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME); @@ -188,8 +192,7 @@ public class TestHFileArchiving { FileSystem fs = region.getRegionFileSystem().getFileSystem(); // make sure there are some files in the regiondir - Path rootDir = FSUtils.getRootDir(fs.getConf()); - Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo()); + Path regionDir = region.getRegionFileSystem().getRegionDir(); FileStatus[] regionFiles = FSUtils.listStatus(fs, regionDir, null); Assert.assertNotNull("No files in the region directory", regionFiles); if (LOG.isDebugEnabled()) { @@ -226,7 +229,7 @@ public class TestHFileArchiving { public void testArchiveOnTableDelete() throws Exception { TableName TABLE_NAME = TableName.valueOf("testArchiveOnTableDelete"); - UTIL.createTable(TABLE_NAME, TEST_FAM); + createTable(TABLE_NAME); List servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME); // make sure we only have 1 region serving this table @@ -306,7 +309,7 @@ public class TestHFileArchiving { public void testArchiveOnTableFamilyDelete() throws Exception { TableName TABLE_NAME = TableName.valueOf("testArchiveOnTableFamilyDelete"); - UTIL.createTable(TABLE_NAME, new byte[][] {TEST_FAM, Bytes.toBytes("fam2")}); + createTwoFamilyTable(TABLE_NAME); List servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME); // make sure we only have 1 region serving this table @@ -360,11 +363,16 @@ public class TestHFileArchiving { Path rootDir = UTIL.getDataTestDirOnTestFS("testCleaningRace"); FileSystem fs = UTIL.getTestFileSystem(); + TableName tableName = TableName.valueOf("table"); + Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY); - Path regionDir = new Path(FSUtils.getTableDir(new Path("./"), - TableName.valueOf("table")), "abcdef"); + Path tableDir = FSUtils.getTableDir(new Path("./"), tableName); + HRegionInfo hri = HRegionInfo.makeTestInfoWithEncodedName(tableName, "abcdefabcdefabcdefabcdefabcdef12"); + HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, tableDir, hri); + Path regionDir = hrfs.getRegionDir(); Path familyDir = new Path(regionDir, "cf"); + Path absoluteTableDir = new Path(rootDir, tableDir); Path sourceRegionDir = new Path(rootDir, regionDir); fs.mkdirs(sourceRegionDir); @@ -387,7 +395,7 @@ public class TestHFileArchiving { try { // Try to archive the file HFileArchiver.archiveRegion(fs, rootDir, - sourceRegionDir.getParent(), sourceRegionDir); + absoluteTableDir, sourceRegionDir); // The archiver succeded, the file is no longer in the original location // but it's in the archive location. @@ -451,4 +459,21 @@ public class TestHFileArchiving { } return fileNames; } + + void createTable(TableName tn) throws Exception { + HTableDescriptor desc = makeDescriptor(tn); + desc.addFamily(new HColumnDescriptor(TEST_FAM)); + UTIL.createTable(desc, null); + } + + void createTwoFamilyTable(TableName tn) throws Exception { + HTableDescriptor desc = makeDescriptor(tn); + desc.addFamily(new HColumnDescriptor(TEST_FAM)); + desc.addFamily(new HColumnDescriptor(TEST_FAM_2)); + UTIL.createTable(desc, null); + } + + HTableDescriptor makeDescriptor(TableName tn) { + return new HTableDescriptor(tn); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHumongousHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHumongousHFileArchiving.java new file mode 100644 index 0000000..e346808 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHumongousHFileArchiving.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.backup; + +import org.apache.hadoop.hbase.fs.layout.FsLayout; +import org.apache.hadoop.hbase.fs.layout.HierarchicalFsLayout; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.experimental.categories.Category; + +@Category({MediumTests.class, MiscTests.class}) +public class TestHumongousHFileArchiving extends TestHFileArchiving { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + FsLayout.setLayoutForTesting(HierarchicalFsLayout.get()); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + FsLayout.reset(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java index 821d5c2..67c0d50 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java @@ -23,11 +23,15 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.util.HashSet; +import java.util.List; import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -35,6 +39,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException; import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; @@ -286,13 +291,13 @@ public class TestRestoreSnapshotFromClient { } private Set getFamiliesFromFS(final TableName tableName) throws IOException { + Connection connection = TEST_UTIL.getConnection(); MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(); + List regions = mfs.getRegionFileSystems(TEST_UTIL.getConfiguration(), + connection, tableName); Set families = new HashSet(); - Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName); - for (Path regionDir: FSUtils.getRegionDirs(mfs.getFileSystem(), tableDir)) { - for (Path familyDir: FSUtils.getFamilyDirs(mfs.getFileSystem(), regionDir)) { - families.add(familyDir.getName()); - } + for (HRegionFileSystem hrfs : regions) { + families.addAll(hrfs.getFamilies()); } return families; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/layout/TestFsLayout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/layout/TestFsLayout.java new file mode 100644 index 0000000..ee217bf --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/layout/TestFsLayout.java @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.fs.layout; + +import static org.junit.Assert.*; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category({MediumTests.class, MiscTests.class}) +public class TestFsLayout { + private static Configuration conf; + private static HBaseTestingUtility TEST_UTIL; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + conf = HBaseConfiguration.create(); + TEST_UTIL = new HBaseTestingUtility(conf); + TEST_UTIL.startMiniCluster(); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + try { + TEST_UTIL.shutdownMiniCluster(); + } finally { + FsLayout.reset(); + } + } + + @Test + public void testLayoutInitialization() throws Exception { + FsLayout.reset(); + assertNull(FsLayout.getRaw()); + + HierarchicalFsLayout hierarchicalLayout = HierarchicalFsLayout.get(); + FsLayout.setLayoutForTesting(hierarchicalLayout); + + assertTrue(FsLayout.getRaw() instanceof HierarchicalFsLayout); + assertTrue(FsLayout.get() instanceof HierarchicalFsLayout); + + FsLayout.reset(); + assertNull(FsLayout.getRaw()); + + DistributedFileSystem fs = TEST_UTIL.getDFSCluster().getFileSystem(); + Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration()); + + FsLayout.writeLayoutFile(fs, rootDir, hierarchicalLayout, true); + assertNull(FsLayout.getRaw()); + conf.setBoolean(FsLayout.FS_LAYOUT_DETECT, true); + FsLayout.initialize(conf); + assertNotNull(FsLayout.getRaw()); + assertTrue(FsLayout.get() instanceof HierarchicalFsLayout); + assertTrue(FsLayout.getRaw() instanceof HierarchicalFsLayout); + + FsLayout.reset(); + FsLayout.deleteLayoutFile(fs, rootDir); + FsLayout.initialize(conf); + assertTrue(FsLayout.get() instanceof StandardHBaseFsLayout); + assertTrue(FsLayout.getRaw() instanceof StandardHBaseFsLayout); + + FsLayout.reset(); + conf.setBoolean(FsLayout.FS_LAYOUT_DETECT_STRICT, true); + try { + FsLayout.initialize(conf); + assertTrue(false); + } catch (IllegalStateException e) { + // Should be thrown by initialize + } + + FsLayout.reset(); + conf.setBoolean(FsLayout.FS_LAYOUT_DETECT, false); + conf.setBoolean(FsLayout.FS_LAYOUT_DETECT_STRICT, false); + FsLayout.writeLayoutFile(fs, rootDir, StandardHBaseFsLayout.get(), true); + conf.set(FsLayout.FS_LAYOUT_CHOICE, HierarchicalFsLayout.class.getName()); + assertTrue(FsLayout.get() instanceof StandardHBaseFsLayout); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java index f2b26c1..6e0f151 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHFileLink.java @@ -18,21 +18,24 @@ package org.apache.hadoop.hbase.io; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; import org.junit.Assert; import org.junit.Test; import org.junit.experimental.categories.Category; +import java.io.IOException; import java.util.regex.Matcher; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; /** @@ -83,20 +86,25 @@ public class TestHFileLink { } @Test - public void testBackReference() { + public void testBackReference() throws IOException { Path rootDir = new Path("/root"); Path archiveDir = new Path(rootDir, ".archive"); String storeFileName = "121212"; String linkDir = FileLink.BACK_REFERENCES_DIRECTORY_PREFIX + storeFileName; - String encodedRegion = "FEFE"; + String encodedRegion = "abcabcabcabcabcabcabcabcabcabcab"; String cf = "cf1"; TableName refTables[] = {TableName.valueOf("refTable"), TableName.valueOf("ns", "refTable")}; + + Configuration conf = HBaseConfiguration.create(); for(TableName refTable : refTables) { Path refTableDir = FSUtils.getTableDir(archiveDir, refTable); - Path refRegionDir = HRegion.getRegionDir(refTableDir, encodedRegion); + HRegionInfo refHri = HRegionInfo.makeTestInfoWithEncodedName(refTable, encodedRegion); + HRegionFileSystem refHrfs = HRegionFileSystem.create(conf, + rootDir.getFileSystem(conf), refTableDir, refHri); + Path refRegionDir = refHrfs.getRegionDir(); Path refDir = new Path(refRegionDir, cf); Path refLinkDir = new Path(refDir, linkDir); String refStoreFileName = refTable.getNameAsString().replace( @@ -107,7 +115,10 @@ public class TestHFileLink { for( TableName tableName : tableNames) { Path tableDir = FSUtils.getTableDir(rootDir, tableName); - Path regionDir = HRegion.getRegionDir(tableDir, encodedRegion); + HRegionInfo hri = HRegionInfo.makeTestInfoWithEncodedName(tableName, encodedRegion); + HRegionFileSystem hrfs = HRegionFileSystem.create(conf, + rootDir.getFileSystem(conf), tableDir, hri); + Path regionDir = hrfs.getRegionDir(); Path cfDir = new Path(regionDir, cf); //Verify back reference creation diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHumongousHFileLink.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHumongousHFileLink.java new file mode 100644 index 0000000..07a289b --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHumongousHFileLink.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io; + +import org.apache.hadoop.hbase.fs.layout.FsLayout; +import org.apache.hadoop.hbase.fs.layout.HierarchicalFsLayout; +import org.apache.hadoop.hbase.testclassification.IOTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.experimental.categories.Category; + +@Category({IOTests.class, SmallTests.class}) +public class TestHumongousHFileLink extends TestHFileLink { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + FsLayout.setLayoutForTesting(HierarchicalFsLayout.get()); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + FsLayout.reset(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java index 6b68bfe..cbf8695 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java @@ -61,8 +61,10 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.RegionObserver; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -581,7 +583,10 @@ public class TestAssignmentManagerOnCluster { FileSystem fs = FileSystem.get(conf); Path tableDir= FSUtils.getTableDir(FSUtils.getRootDir(conf), table); - Path regionDir = new Path(tableDir, hri.getEncodedName()); + + HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, tableDir, hri); + Path regionDir = hrfs.getRegionDir(); + // create a file named the same as the region dir to // mess up with region opening fs.create(regionDir, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java index 813eb49..7f32c04 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java @@ -61,6 +61,7 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.MiniHBaseCluster; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; @@ -71,7 +72,6 @@ import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ConnectionUtils; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Increment; import org.apache.hadoop.hbase.client.NonceGenerator; import org.apache.hadoop.hbase.client.PerClientRandomNonceGenerator; @@ -88,7 +88,7 @@ import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; import org.apache.hadoop.hbase.master.SplitLogManager.TaskBatch; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse.CompactionState; -import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.wal.HLogKey; @@ -120,7 +120,7 @@ import org.junit.experimental.categories.Category; @Category({MasterTests.class, LargeTests.class}) @SuppressWarnings("deprecation") public class TestDistributedLogSplitting { - private static final Log LOG = LogFactory.getLog(TestSplitLogManager.class); + static final Log LOG = LogFactory.getLog(TestSplitLogManager.class); static { // Uncomment the following line if more verbosity is needed for // debugging (see HBASE-12285 for details). @@ -224,7 +224,6 @@ public class TestDistributedLogSplitting { installTable(new ZooKeeperWatcher(conf, "table-creation", null), "table", "family", 40); - TableName table = TableName.valueOf("table"); List regions = null; HRegionServer hrs = null; for (int i = 0; i < NUM_RS; i++) { @@ -258,10 +257,10 @@ public class TestDistributedLogSplitting { int count = 0; for (HRegionInfo hri : regions) { - - Path tdir = FSUtils.getTableDir(rootdir, table); - Path editsdir = - WALSplitter.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, hri.getEncodedName())); + Path tableDir = FSUtils.getTableDir(rootdir, hri.getTable()); + HRegionFileSystem hrfs = HRegionFileSystem.create( + conf, fs, tableDir, hri); + Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(hrfs.getRegionDir()); LOG.debug("checking edits dir " + editsdir); FileStatus[] files = fs.listStatus(editsdir, new PathFilter() { @Override @@ -849,10 +848,11 @@ public class TestDistributedLogSplitting { int count = 0; FileSystem fs = master.getMasterFileSystem().getFileSystem(); Path rootdir = FSUtils.getRootDir(conf); - Path tdir = FSUtils.getTableDir(rootdir, TableName.valueOf("disableTable")); for (HRegionInfo hri : regions) { - Path editsdir = - WALSplitter.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, hri.getEncodedName())); + Path tableDir = FSUtils.getTableDir(rootdir, hri.getTable()); + HRegionFileSystem hrfs = HRegionFileSystem.create( + conf, fs, tableDir, hri); + Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(hrfs.getRegionDir()); LOG.debug("checking edits dir " + editsdir); if(!fs.exists(editsdir)) continue; FileStatus[] files = fs.listStatus(editsdir, new PathFilter() { @@ -880,8 +880,10 @@ public class TestDistributedLogSplitting { // clean up for (HRegionInfo hri : regions) { - Path editsdir = - WALSplitter.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, hri.getEncodedName())); + Path tableDir = FSUtils.getTableDir(rootdir, hri.getTable()); + HRegionFileSystem hrfs = HRegionFileSystem.create( + conf, fs, tableDir, hri); + Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(hrfs.getRegionDir()); fs.delete(editsdir, true); } disablingHT.close(); @@ -1403,14 +1405,19 @@ public class TestDistributedLogSplitting { final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null); Table ht = installTable(zkw, "table", "family", 10); FileSystem fs = master.getMasterFileSystem().getFileSystem(); + Path tableDir = FSUtils.getTableDir(FSUtils.getRootDir(conf), TableName.valueOf("table")); - List regionDirs = FSUtils.getRegionDirs(fs, tableDir); - long newSeqId = WALSplitter.writeRegionSequenceIdFile(fs, regionDirs.get(0), 1L, 1000L); - WALSplitter.writeRegionSequenceIdFile(fs, regionDirs.get(0) , 1L, 1000L); + + List tableRegions = MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), TableName.valueOf("table")); + HRegionInfo hri = tableRegions.get(0); + HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, tableDir, hri); + + long newSeqId = WALSplitter.writeRegionSequenceIdFile(fs, hrfs.getRegionDir(), 1L, 1000L); + WALSplitter.writeRegionSequenceIdFile(fs, hrfs.getRegionDir(), 1L, 1000L); assertEquals(newSeqId + 2000, - WALSplitter.writeRegionSequenceIdFile(fs, regionDirs.get(0), 3L, 1000L)); + WALSplitter.writeRegionSequenceIdFile(fs, hrfs.getRegionDir(), 3L, 1000L)); - Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(regionDirs.get(0)); + Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(hrfs.getRegionDir()); FileStatus[] files = FSUtils.listStatus(fs, editsdir, new PathFilter() { @Override public boolean accept(Path p) { @@ -1421,7 +1428,7 @@ public class TestDistributedLogSplitting { assertEquals(1, files.length); // verify all seqId files aren't treated as recovered.edits files - NavigableSet recoveredEdits = WALSplitter.getSplitEditFilesSorted(fs, regionDirs.get(0)); + NavigableSet recoveredEdits = WALSplitter.getSplitEditFilesSorted(fs, hrfs.getRegionDir()); assertEquals(0, recoveredEdits.size()); ht.close(); @@ -1437,7 +1444,8 @@ public class TestDistributedLogSplitting { TableName table = TableName.valueOf(tname); byte [] family = Bytes.toBytes(fname); LOG.info("Creating table with " + nrs + " regions"); - Table ht = TEST_UTIL.createMultiRegionTable(table, family, nrs); + HTableDescriptor desc = new HTableDescriptor(table); + Table ht = TEST_UTIL.createMultiRegionTable(desc, family, nrs); int numRegions = -1; try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(table)) { numRegions = r.getStartKeys().length; @@ -1467,7 +1475,7 @@ public class TestDistributedLogSplitting { assertEquals(numRegions + 2 + existingRegions, regions.size()); return ht; } - + void populateDataInTable(int nrows, String fname) throws Exception { byte [] family = Bytes.toBytes(fname); @@ -1614,7 +1622,7 @@ public class TestDistributedLogSplitting { return count; } - private void blockUntilNoRIT(ZooKeeperWatcher zkw, HMaster master) throws Exception { + void blockUntilNoRIT(ZooKeeperWatcher zkw, HMaster master) throws Exception { TEST_UTIL.waitUntilNoRegionsInTransition(60000); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHumongousTableDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHumongousTableDistributedLogSplitting.java new file mode 100644 index 0000000..834d3d0 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestHumongousTableDistributedLogSplitting.java @@ -0,0 +1,41 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import org.apache.hadoop.hbase.fs.layout.FsLayout; +import org.apache.hadoop.hbase.fs.layout.HierarchicalFsLayout; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.experimental.categories.Category; + +@Category({MasterTests.class, LargeTests.class}) +public class TestHumongousTableDistributedLogSplitting extends TestDistributedLogSplitting { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + FsLayout.setLayoutForTesting(HierarchicalFsLayout.get()); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + FsLayout.reset(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java index 078aaa6..2423a5c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java @@ -36,8 +36,11 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.fs.layout.FsLayout; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdge; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; @@ -173,11 +176,12 @@ public class TestHFileCleaner { // setup the cleaner FileSystem fs = UTIL.getDFSCluster().getFileSystem(); HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir); - + // make all the directories for archiving files Path table = new Path(archivedHfileDir, "table"); - Path region = new Path(table, "regionsomthing"); - Path family = new Path(region, "fam"); + Path family = HStore.getStoreHomedir(table, "regionsomething", Bytes.toBytes("fam")); + Path region = family.getParent(); + Path file = new Path(family, "file12345"); fs.mkdirs(family); if (!fs.exists(family)) throw new RuntimeException("Couldn't create test family:" + family); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java index 66874e6..c309523 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java @@ -35,9 +35,12 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.ClusterConnection; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.io.HFileLink; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.testclassification.MasterTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileArchiveUtil; import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; @@ -71,20 +74,18 @@ public class TestHFileLinkCleaner { HRegionInfo hriLink = new HRegionInfo(tableLinkName); Path archiveDir = HFileArchiveUtil.getArchivePath(conf); - Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf, - tableName, hri.getEncodedName(), familyName); - Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf, - tableLinkName, hriLink.getEncodedName(), familyName); + Path tableDir = FSUtils.getTableDir(rootDir, tableName); + Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf, hri, tableDir, Bytes.toBytes(familyName)); // Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf); - Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName); + Path familyPath = getFamilyDirPath(conf, fs, archiveDir, tableName, hri, familyName); fs.mkdirs(familyPath); Path hfilePath = new Path(familyPath, hfileName); fs.createNewFile(hfilePath); // Create link to hfile - Path familyLinkPath = getFamilyDirPath(rootDir, tableLinkName, - hriLink.getEncodedName(), familyName); + Path familyLinkPath = getFamilyDirPath(conf, fs, rootDir, tableLinkName, + hriLink, familyName); fs.mkdirs(familyLinkPath); HFileLink.create(conf, fs, familyLinkPath, hri, hfileName); Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName); @@ -124,9 +125,14 @@ public class TestHFileLinkCleaner { assertFalse("Link should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableLinkName))); } - private static Path getFamilyDirPath (final Path rootDir, final TableName table, - final String region, final String family) { - return new Path(new Path(FSUtils.getTableDir(rootDir, table), region), family); + private static Path getFamilyDirPath (final Configuration conf, FileSystem fs, + final Path rootDir, final TableName table, + final HRegionInfo hri, final String family) { + Path tableDir = FSUtils.getTableDir(rootDir, table); + HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, tableDir, hri); + Path regionDir = hrfs.getRegionDir(); + Path familyDir = new Path(regionDir, family); + return familyDir; } static class DummyServer implements Server { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHumongousHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHumongousHFileLinkCleaner.java new file mode 100644 index 0000000..894bfb5 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHumongousHFileLinkCleaner.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.cleaner; + +import org.apache.hadoop.hbase.fs.layout.FsLayout; +import org.apache.hadoop.hbase.fs.layout.HierarchicalFsLayout; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.experimental.categories.Category; + +@Category({MasterTests.class, SmallTests.class}) +public class TestHumongousHFileLinkCleaner extends TestHFileLinkCleaner { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + FsLayout.setLayoutForTesting(HierarchicalFsLayout.get()); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + FsLayout.reset(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java index 3c070e9..756b181 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java @@ -341,7 +341,7 @@ public class TestSnapshotFromMaster { LOG.debug(fileName); } // get the archived files for the table - Collection files = getArchivedHFiles(archiveDir, rootDir, fs, TABLE_NAME); + Collection files = getArchivedHFiles(archiveDir, rootDir, fs, htd, TABLE_NAME); // and make sure that there is a proper subset for (String fileName : snapshotHFiles) { @@ -368,7 +368,7 @@ public class TestSnapshotFromMaster { LOG.info("After delete snapshot cleaners run File-System state"); FSUtils.logFileSystemState(fs, rootDir, LOG); - files = getArchivedHFiles(archiveDir, rootDir, fs, TABLE_NAME); + files = getArchivedHFiles(archiveDir, rootDir, fs, htd, TABLE_NAME); assertEquals("Still have some hfiles in the archive, when their snapshot has been deleted.", 0, files.size()); } @@ -378,9 +378,9 @@ public class TestSnapshotFromMaster { * @throws IOException on expected failure */ private final Collection getArchivedHFiles(Path archiveDir, Path rootDir, - FileSystem fs, TableName tableName) throws IOException { + FileSystem fs, HTableDescriptor desc, TableName tableName) throws IOException { Path tableArchive = FSUtils.getTableDir(archiveDir, tableName); - Path[] archivedHFiles = SnapshotTestingUtils.listHFiles(fs, tableArchive); + Path[] archivedHFiles = SnapshotTestingUtils.listHFiles(fs, desc, tableArchive); List files = new ArrayList(archivedHFiles.length); LOG.debug("Have archived hfiles: " + tableArchive); for (Path file : archivedHFiles) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java index c82aecc..8d68bdb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java @@ -20,10 +20,12 @@ package org.apache.hadoop.hbase.master.procedure; import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; +import java.util.ArrayList; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -45,6 +47,7 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.TableStateManager; import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.util.ModifyRegionUtils; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; @@ -79,21 +82,29 @@ public class MasterProcedureTestingUtility { return regions; } - public static void validateTableCreation(final HMaster master, final TableName tableName, + public static void validateTableCreation(final Configuration conf, final Connection conn, + final HMaster master, final TableName tableName, final HRegionInfo[] regions, String... family) throws IOException { - validateTableCreation(master, tableName, regions, true, family); + validateTableCreation(conf, conn, master, tableName, regions, true, family); } - public static void validateTableCreation(final HMaster master, final TableName tableName, + public static void validateTableCreation(final Configuration conf, final Connection conn, + final HMaster master, final TableName tableName, final HRegionInfo[] regions, boolean hasFamilyDirs, String... family) throws IOException { // check filesystem final FileSystem fs = master.getMasterFileSystem().getFileSystem(); final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName); assertTrue(fs.exists(tableDir)); FSUtils.logFileSystemState(fs, tableDir, LOG); - List allRegionDirs = FSUtils.getRegionDirs(fs, tableDir); + List allRegionDirs = new ArrayList(); + List regionFileSystems = master.getMasterFileSystem().getRegionFileSystems( + conf, conn, tableName); + for (HRegionFileSystem hrfs : regionFileSystems) { + allRegionDirs.add(hrfs.getRegionDir()); + } for (int i = 0; i < regions.length; ++i) { - Path regionDir = new Path(tableDir, regions[i].getEncodedName()); + HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, tableDir, regions[i]); + Path regionDir = hrfs.getRegionDir(); assertTrue(regions[i] + " region dir does not exist", fs.exists(regionDir)); assertTrue(allRegionDirs.remove(regionDir)); List allFamilyDirs = FSUtils.getFamilyDirs(fs, regionDir); @@ -363,7 +374,8 @@ public class MasterProcedureTestingUtility { assertTrue(htd.getHTableDescriptor().hasFamily(family.getBytes())); } - public static void validateColumnFamilyDeletion(final HMaster master, final TableName tableName, + public static void validateColumnFamilyDeletion(final Configuration conf, final Connection conn, + final HMaster master, final TableName tableName, final String family) throws IOException { // verify htd TableDescriptor htd = master.getTableDescriptors().getDescriptor(tableName); @@ -372,9 +384,10 @@ public class MasterProcedureTestingUtility { // verify fs final FileSystem fs = master.getMasterFileSystem().getFileSystem(); - final Path tableDir = FSUtils.getTableDir(master.getMasterFileSystem().getRootDir(), tableName); - for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) { - final Path familyDir = new Path(regionDir, family); + List regions = master.getMasterFileSystem().getRegionFileSystems( + conf, conn, tableName); + for (HRegionFileSystem hrfs : regions) { + final Path familyDir = new Path(hrfs.getRegionDir(), family); assertFalse(family + " family dir should not exist", fs.exists(familyDir)); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAddColumnFamilyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAddColumnFamilyProcedure.java index 1490aa1..47f0b13 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAddColumnFamilyProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestAddColumnFamilyProcedure.java @@ -236,8 +236,9 @@ public class TestAddColumnFamilyProcedure { MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps, AddColumnFamilyState.values()); - MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), - tableName, cf6); + MasterProcedureTestingUtility.validateColumnFamilyDeletion( + UTIL.getConfiguration(), UTIL.getConnection(), + UTIL.getHBaseCluster().getMaster(), tableName, cf6); } private ProcedureExecutor getMasterProcedureExecutor() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java index 7cd64b6..4d20546 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateTableProcedure.java @@ -113,6 +113,7 @@ public class TestCreateTableProcedure { HRegionInfo[] regions = MasterProcedureTestingUtility.createTable( getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2"); MasterProcedureTestingUtility.validateTableCreation( + UTIL.getConfiguration(), UTIL.getConnection(), UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); } @@ -161,6 +162,7 @@ public class TestCreateTableProcedure { procExec, procId, 6, CreateTableState.values()); MasterProcedureTestingUtility.validateTableCreation( + UTIL.getConfiguration(), UTIL.getConnection(), UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java index dcf1940..eeeea0d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java @@ -97,8 +97,9 @@ public class TestDeleteColumnFamilyProcedure { ProcedureTestingUtility.waitProcedure(procExec, procId1); ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); - MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), - tableName, cf1); + MasterProcedureTestingUtility.validateColumnFamilyDeletion( + UTIL.getConfiguration(), UTIL.getConnection(), + UTIL.getHBaseCluster().getMaster(), tableName, cf1); // Test 2: delete the column family that exists offline UTIL.getHBaseAdmin().disableTable(tableName); @@ -128,8 +129,9 @@ public class TestDeleteColumnFamilyProcedure { // First delete should succeed ProcedureTestingUtility.assertProcNotFailed(procExec, procId1); - MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), - tableName, cf2); + MasterProcedureTestingUtility.validateColumnFamilyDeletion( + UTIL.getConfiguration(), UTIL.getConnection(), + UTIL.getHBaseCluster().getMaster(), tableName, cf2); // delete the column family that does not exist long procId2 = @@ -204,8 +206,9 @@ public class TestDeleteColumnFamilyProcedure { MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps, DeleteColumnFamilyState.values()); - MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), - tableName, cf4); + MasterProcedureTestingUtility.validateColumnFamilyDeletion( + UTIL.getConfiguration(), UTIL.getConnection(), + UTIL.getHBaseCluster().getMaster(), tableName, cf4); } @Test(timeout = 60000) @@ -230,8 +233,9 @@ public class TestDeleteColumnFamilyProcedure { MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps, DeleteColumnFamilyState.values()); - MasterProcedureTestingUtility.validateColumnFamilyDeletion(UTIL.getHBaseCluster().getMaster(), - tableName, cf5); + MasterProcedureTestingUtility.validateColumnFamilyDeletion( + UTIL.getConfiguration(), UTIL.getConnection(), + UTIL.getHBaseCluster().getMaster(), tableName, cf5); } @Test(timeout = 60000) @@ -262,6 +266,7 @@ public class TestDeleteColumnFamilyProcedure { DeleteColumnFamilyState.values()); MasterProcedureTestingUtility.validateTableCreation( + UTIL.getConfiguration(), UTIL.getConnection(), UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2", "f3", cf5); } @@ -293,6 +298,7 @@ public class TestDeleteColumnFamilyProcedure { DeleteColumnFamilyState.values()); MasterProcedureTestingUtility.validateColumnFamilyDeletion( + UTIL.getConfiguration(), UTIL.getConnection(), UTIL.getHBaseCluster().getMaster(), tableName, cf5); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java index 2576302..53d4f5c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterFailoverWithProcedures.java @@ -174,6 +174,7 @@ public class TestMasterFailoverWithProcedures { testRecoveryAndDoubleExecution(UTIL, procId, step, CreateTableState.values()); MasterProcedureTestingUtility.validateTableCreation( + UTIL.getConfiguration(), UTIL.getConnection(), UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); } @@ -199,6 +200,7 @@ public class TestMasterFailoverWithProcedures { getMasterProcedureExecutor(), tableName, splitKeys, "f1", "f2"); Path tableDir = FSUtils.getTableDir(getRootDir(), tableName); MasterProcedureTestingUtility.validateTableCreation( + UTIL.getConfiguration(), UTIL.getConnection(), UTIL.getHBaseCluster().getMaster(), tableName, regions, "f1", "f2"); UTIL.getHBaseAdmin().disableTable(tableName); @@ -265,6 +267,7 @@ public class TestMasterFailoverWithProcedures { assertEquals(1, regions.length); } MasterProcedureTestingUtility.validateTableCreation( + UTIL.getConfiguration(), UTIL.getConnection(), UTIL.getHBaseCluster().getMaster(), tableName, regions, families); // verify that there are no rows in the table diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java index af29338..8962a81 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyTableProcedure.java @@ -241,7 +241,9 @@ public class TestModifyTableProcedure { assertEquals(2, currentHtd.getFamiliesKeys().size()); // cf2 should be added cf3 should be removed - MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(), + MasterProcedureTestingUtility.validateTableCreation( + UTIL.getConfiguration(), UTIL.getConnection(), + UTIL.getHBaseCluster().getMaster(), tableName, regions, false, "cf1", cf2); } @@ -282,7 +284,9 @@ public class TestModifyTableProcedure { assertFalse(currentHtd.hasFamily(cf3.getBytes())); // cf2 should be added cf3 should be removed - MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(), + MasterProcedureTestingUtility.validateTableCreation( + UTIL.getConfiguration(), UTIL.getConnection(), + UTIL.getHBaseCluster().getMaster(), tableName, regions, "cf1", cf2); } @@ -316,7 +320,9 @@ public class TestModifyTableProcedure { ModifyTableState.values()); // cf2 should not be present - MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(), + MasterProcedureTestingUtility.validateTableCreation( + UTIL.getConfiguration(), UTIL.getConnection(), + UTIL.getHBaseCluster().getMaster(), tableName, regions, "cf1"); } @@ -353,7 +359,9 @@ public class TestModifyTableProcedure { ModifyTableState.values()); // cf2 should not be present - MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(), + MasterProcedureTestingUtility.validateTableCreation( + UTIL.getConfiguration(), UTIL.getConnection(), + UTIL.getHBaseCluster().getMaster(), tableName, regions, "cf1"); } @@ -393,7 +401,9 @@ public class TestModifyTableProcedure { ModifyTableState.values()); // "cf2" should be added and "cf1" should be removed - MasterProcedureTestingUtility.validateTableCreation(UTIL.getHBaseCluster().getMaster(), + MasterProcedureTestingUtility.validateTableCreation( + UTIL.getConfiguration(), UTIL.getConnection(), + UTIL.getHBaseCluster().getMaster(), tableName, regions, false, familyToAddName); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java index 58acbae..d448fe8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java @@ -164,6 +164,7 @@ public class TestTruncateTableProcedure { assertEquals(1, regions.length); } MasterProcedureTestingUtility.validateTableCreation( + UTIL.getConfiguration(), UTIL.getConnection(), UTIL.getHBaseCluster().getMaster(), tableName, regions, families); // verify that there are no rows in the table @@ -229,6 +230,7 @@ public class TestTruncateTableProcedure { assertEquals(1, regions.length); } MasterProcedureTestingUtility.validateTableCreation( + UTIL.getConfiguration(), UTIL.getConnection(), UTIL.getHBaseCluster().getMaster(), tableName, regions, families); // verify that there are no rows in the table diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactHumongousRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactHumongousRegion.java new file mode 100644 index 0000000..4ec5318 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactHumongousRegion.java @@ -0,0 +1,41 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.fs.layout.FsLayout; +import org.apache.hadoop.hbase.fs.layout.HierarchicalFsLayout; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.experimental.categories.Category; + +@Category({RegionServerTests.class, MediumTests.class}) +public class TestCompactHumongousRegion extends TestCompaction { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + FsLayout.setLayoutForTesting(HierarchicalFsLayout.get()); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + FsLayout.reset(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index fcc9fc3..6464db4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -86,11 +86,11 @@ import org.mockito.stubbing.Answer; public class TestCompaction { @Rule public TestName name = new TestName(); private static final Log LOG = LogFactory.getLog(TestCompaction.class.getName()); - private static final HBaseTestingUtility UTIL = HBaseTestingUtility.createLocalHTU(); + static final HBaseTestingUtility UTIL = HBaseTestingUtility.createLocalHTU(); protected Configuration conf = UTIL.getConfiguration(); - private HRegion r = null; - private HTableDescriptor htd = null; + HRegion r = null; + HTableDescriptor htd = null; private static final byte [] COLUMN_FAMILY = fam1; private final byte [] STARTROW = Bytes.toBytes(START_KEY); private static final byte [] COLUMN_FAMILY_TEXT = COLUMN_FAMILY; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java index 5f792fa..85182e5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.util.Progressable; - import org.junit.Test; import org.junit.experimental.categories.Category; @@ -77,7 +76,7 @@ public class TestHRegionFileSystem { assertEquals(regionDir, regionFs.getRegionDir()); // Delete the region - HRegionFileSystem.deleteRegionFromFileSystem(conf, fs, + HRegionFileSystem.deleteAndArchiveRegionFromFileSystem(conf, fs, FSUtils.getTableDir(rootDir, hri.getTable()), hri); assertFalse("The region folder should be removed", fs.exists(regionDir)); @@ -95,7 +94,7 @@ public class TestHRegionFileSystem { HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri); assertTrue(fs.exists(regionFs.getRegionDir())); - regionFs = new HRegionFileSystem(conf, new MockFileSystemForCreate(), + regionFs = HRegionFileSystem.create(conf, new MockFileSystemForCreate(), null, null); // HRegionFileSystem.createRegionOnFileSystem(conf, new MockFileSystemForCreate(), rootDir, // hri); @@ -103,11 +102,11 @@ public class TestHRegionFileSystem { assertTrue("Couldn't create the directory", result); - regionFs = new HRegionFileSystem(conf, new MockFileSystem(), null, null); + regionFs = HRegionFileSystem.create(conf, new MockFileSystem(), null, null); result = regionFs.rename(new Path("/foo/bar"), new Path("/foo/bar2")); assertTrue("Couldn't rename the directory", result); - regionFs = new HRegionFileSystem(conf, new MockFileSystem(), null, null); + regionFs = HRegionFileSystem.create(conf, new MockFileSystem(), null, null); result = regionFs.deleteDir(new Path("/foo/bar")); assertTrue("Couldn't delete the directory", result); fs.delete(rootDir, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHumongousRegionMerge.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHumongousRegionMerge.java new file mode 100644 index 0000000..7d05fcd --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHumongousRegionMerge.java @@ -0,0 +1,41 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.fs.layout.FsLayout; +import org.apache.hadoop.hbase.fs.layout.HierarchicalFsLayout; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.experimental.categories.Category; + +@Category({RegionServerTests.class, SmallTests.class}) +public class TestHumongousRegionMerge extends TestRegionMergeTransaction { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + FsLayout.setLayoutForTesting(HierarchicalFsLayout.get()); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + FsLayout.reset(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHumongousRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHumongousRegionMergeTransactionOnCluster.java new file mode 100644 index 0000000..fb3095d --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHumongousRegionMergeTransactionOnCluster.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.fs.layout.FsLayout; +import org.apache.hadoop.hbase.fs.layout.HierarchicalFsLayout; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.experimental.categories.Category; + +@Category({RegionServerTests.class, LargeTests.class}) +public class TestHumongousRegionMergeTransactionOnCluster extends TestRegionMergeTransactionOnCluster { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + FsLayout.setLayoutForTesting(HierarchicalFsLayout.get()); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + FsLayout.reset(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHumongousStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHumongousStoreFile.java new file mode 100644 index 0000000..869e179 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHumongousStoreFile.java @@ -0,0 +1,41 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.fs.layout.FsLayout; +import org.apache.hadoop.hbase.fs.layout.HierarchicalFsLayout; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.experimental.categories.Category; + +@Category({RegionServerTests.class, SmallTests.class}) +public class TestHumongousStoreFile extends TestStoreFile { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + FsLayout.setLayoutForTesting(HierarchicalFsLayout.get()); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + FsLayout.reset(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java index c339bb1..75fc2f8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -102,7 +103,7 @@ public class TestRecoveredEdits { FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration()); Path tableDir = FSUtils.getTableDir(hbaseRootDir, htd.getTableName()); HRegionFileSystem hrfs = - new HRegionFileSystem(TEST_UTIL.getConfiguration(), fs, tableDir, hri); + HRegionFileSystem.create(TEST_UTIL.getConfiguration(), fs, tableDir, hri); if (fs.exists(hrfs.getRegionDir())) { LOG.info("Region directory already exists. Deleting."); fs.delete(hrfs.getRegionDir(), true); @@ -113,7 +114,7 @@ public class TestRecoveredEdits { // There should be no store files. assertTrue(storeFiles.isEmpty()); region.close(); - Path regionDir = region.getRegionDir(hbaseRootDir, hri); + Path regionDir = hrfs.getRegionDir(); Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regionDir); // This is a little fragile getting this path to a file of 10M of edits. Path recoveredEditsFile = new Path(new Path( diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java index b2115b3..3dfe1ec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java @@ -64,7 +64,7 @@ import com.google.common.collect.ImmutableList; */ @Category({RegionServerTests.class, SmallTests.class}) public class TestRegionMergeTransaction { - private final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private final Path testdir = TEST_UTIL.getDataTestDir(this.getClass() .getName()); private HRegion region_a; @@ -77,7 +77,7 @@ public class TestRegionMergeTransaction { private static final byte[] STARTROW_B = new byte[] { 'g', 'g', 'g' }; private static final byte[] STARTROW_C = new byte[] { 'w', 'w', 'w' }; private static final byte[] ENDROW = new byte[] { '{', '{', '{' }; - private static final byte[] CF = HConstants.CATALOG_FAMILY; + static final byte[] CF = HConstants.CATALOG_FAMILY; @Before public void setup() throws IOException { @@ -291,9 +291,11 @@ public class TestRegionMergeTransaction { int rowCountOfRegionB2 = countRows(this.region_b); assertEquals(rowCountOfRegionB, rowCountOfRegionB2); + Path tableDir = this.region_a.getRegionFileSystem().getTableDir(); + // Assert rollback cleaned up stuff in fs - assertTrue(!this.fs.exists(HRegion.getRegionDir(this.testdir, - mt.getMergedRegionInfo()))); + assertTrue(!HRegionFileSystem.create(TEST_UTIL.getConfiguration(), + fs, tableDir, mt.getMergedRegionInfo()).existsOnDisk()); assertTrue(!this.region_a.lock.writeLock().isHeldByCurrentThread()); assertTrue(!this.region_b.lock.writeLock().isHeldByCurrentThread()); @@ -348,10 +350,12 @@ public class TestRegionMergeTransaction { // Make sure that merged region is still in the filesystem, that // they have not been removed; this is supposed to be the case if we go // past point of no return. - Path tableDir = this.region_a.getRegionFileSystem().getRegionDir() - .getParent(); - Path mergedRegionDir = new Path(tableDir, mt.getMergedRegionInfo() - .getEncodedName()); + Path tableDir = this.region_a.getRegionFileSystem().getTableDir(); + + HRegionFileSystem mergedRegionFilesystem = HRegionFileSystem.openRegionFromFileSystem( + TEST_UTIL.getConfiguration(), fs, tableDir, mt.getMergedRegionInfo(), true); + + Path mergedRegionDir = mergedRegionFilesystem.getRegionDir(); assertTrue(TEST_UTIL.getTestFileSystem().exists(mergedRegionDir)); } @@ -404,7 +408,7 @@ public class TestRegionMergeTransaction { private class MockedFailedMergedRegionOpen extends IOException { } - private HRegion createRegion(final Path testdir, final WALFactory wals, + HRegion createRegion(final Path testdir, final WALFactory wals, final byte[] startrow, final byte[] endrow) throws IOException { // Make a region with start and end keys. @@ -418,7 +422,7 @@ public class TestRegionMergeTransaction { return HRegion.openHRegion(testdir, hri, htd, wals.getWAL(hri.getEncodedNameAsBytes()), TEST_UTIL.getConfiguration()); } - + private int countRows(final HRegion r) throws IOException { int rowcount = 0; InternalScanner scanner = r.getScanner(new Scan()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java index 2a949a1..b7133a8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransactionOnCluster.java @@ -232,15 +232,17 @@ public class TestRegionMergeTransactionOnCluster { Path rootDir = master.getMasterFileSystem().getRootDir(); Path tabledir = FSUtils.getTableDir(rootDir, mergedRegionInfo.getTable()); - Path regionAdir = new Path(tabledir, regionA.getEncodedName()); - Path regionBdir = new Path(tabledir, regionB.getEncodedName()); - assertTrue(fs.exists(regionAdir)); - assertTrue(fs.exists(regionBdir)); + HRegionFileSystem hrfsA = HRegionFileSystem.create( + TEST_UTIL.getConfiguration(), fs, tabledir, regionA); + HRegionFileSystem hrfsB = HRegionFileSystem.create( + TEST_UTIL.getConfiguration(), fs, tabledir, regionB); + assertTrue(hrfsA.existsOnDisk()); + assertTrue(hrfsB.existsOnDisk()); admin.compactRegion(mergedRegionInfo.getRegionName()); // wait until merged region doesn't have reference file long timeout = System.currentTimeMillis() + waitTime; - HRegionFileSystem hrfs = new HRegionFileSystem( + HRegionFileSystem hrfs = HRegionFileSystem.create( TEST_UTIL.getConfiguration(), fs, tabledir, mergedRegionInfo); while (System.currentTimeMillis() < timeout) { if (!hrfs.hasReferences(tableDescritor)) { @@ -254,8 +256,8 @@ public class TestRegionMergeTransactionOnCluster { // files of merging regions int cleaned = admin.runCatalogScan(); assertTrue(cleaned > 0); - assertFalse(fs.exists(regionAdir)); - assertFalse(fs.exists(regionBdir)); + assertFalse(hrfsA.existsOnDisk()); + assertFalse(hrfsB.existsOnDisk()); mergedRegionResult = MetaTableAccessor.getRegionResult( master.getConnection(), mergedRegionInfo.getRegionName()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitHumongousTableTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitHumongousTableTransactionOnCluster.java new file mode 100644 index 0000000..26462d0 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitHumongousTableTransactionOnCluster.java @@ -0,0 +1,41 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.fs.layout.FsLayout; +import org.apache.hadoop.hbase.fs.layout.HierarchicalFsLayout; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.experimental.categories.Category; + +@Category({RegionServerTests.class, LargeTests.class}) +public class TestSplitHumongousTableTransactionOnCluster extends TestSplitTransactionOnCluster { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + FsLayout.setLayoutForTesting(HierarchicalFsLayout.get()); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + FsLayout.reset(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java index 4f371bd..55f806e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java @@ -22,10 +22,11 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.anyInt; -import static org.mockito.Matchers.eq; +import static org.mockito.Matchers.anyObject; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.when; import java.io.IOException; @@ -71,17 +72,17 @@ import com.google.common.collect.ImmutableList; */ @Category({RegionServerTests.class, SmallTests.class}) public class TestSplitTransaction { - private final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private final Path testdir = TEST_UTIL.getDataTestDir(this.getClass().getName()); private HRegion parent; private WALFactory wals; private FileSystem fs; - private static final byte [] STARTROW = new byte [] {'a', 'a', 'a'}; + static final byte [] STARTROW = new byte [] {'a', 'a', 'a'}; // '{' is next ascii after 'z'. - private static final byte [] ENDROW = new byte [] {'{', '{', '{'}; + static final byte [] ENDROW = new byte [] {'{', '{', '{'}; private static final byte [] GOOD_SPLIT_ROW = new byte [] {'d', 'd', 'd'}; - private static final byte [] CF = HConstants.CATALOG_FAMILY; + static final byte [] CF = HConstants.CATALOG_FAMILY; private static boolean preRollBackCalled = false; private static boolean postRollBackCalled = false; @@ -145,9 +146,8 @@ public class TestSplitTransaction { // Make sure that region a and region b are still in the filesystem, that // they have not been removed; this is supposed to be the case if we go // past point of no return. - Path tableDir = this.parent.getRegionFileSystem().getTableDir(); - Path daughterADir = new Path(tableDir, spiedUponSt.getFirstDaughter().getEncodedName()); - Path daughterBDir = new Path(tableDir, spiedUponSt.getSecondDaughter().getEncodedName()); + Path daughterADir = this.parent.getRegionFileSystem().getDaughterRegionDir(spiedUponSt.getFirstDaughter()); + Path daughterBDir = this.parent.getRegionFileSystem().getDaughterRegionDir(spiedUponSt.getSecondDaughter()); assertTrue(TEST_UTIL.getTestFileSystem().exists(daughterADir)); assertTrue(TEST_UTIL.getTestFileSystem().exists(daughterBDir)); } @@ -269,9 +269,8 @@ public class TestSplitTransaction { SplitTransactionImpl st = prepareGOOD_SPLIT_ROW(spiedRegion); SplitTransactionImpl spiedUponSt = spy(st); doThrow(new IOException("Failing split. Expected reference file count isn't equal.")) - .when(spiedUponSt).assertReferenceFileCount(anyInt(), - eq(new Path(this.parent.getRegionFileSystem().getTableDir(), - st.getSecondDaughter().getEncodedName()))); + .when(spiedUponSt).assertReferenceFileCountOfDaughterDir(anyInt(), + eq(st.getSecondDaughter())); // Run the execute. Look at what it returns. boolean expectedException = false; @@ -296,8 +295,8 @@ public class TestSplitTransaction { HRegion spiedRegion = spy(this.parent); SplitTransactionImpl st = prepareGOOD_SPLIT_ROW(spiedRegion); SplitTransactionImpl spiedUponSt = spy(st); - doNothing().when(spiedUponSt).assertReferenceFileCount(anyInt(), - eq(parent.getRegionFileSystem().getSplitsDir(st.getFirstDaughter()))); + doNothing().when(spiedUponSt).assertReferenceFileCountOfSplitsDir(anyInt(), + eq(st.getFirstDaughter())); when(spiedRegion.createDaughterRegionFromSplits(spiedUponSt.getSecondDaughter())). thenThrow(new MockedFailedDaughterCreation()); // Run the execute. Look at what it returns. @@ -318,8 +317,8 @@ public class TestSplitTransaction { assertEquals(parentRowCount, parentRowCount2); // Assert rollback cleaned up stuff in fs - assertTrue(!this.fs.exists(HRegion.getRegionDir(this.testdir, st.getFirstDaughter()))); - assertTrue(!this.fs.exists(HRegion.getRegionDir(this.testdir, st.getSecondDaughter()))); + assertTrue(!this.fs.exists(spiedRegion.getRegionFileSystem().getDaughterRegionDir(st.getFirstDaughter()))); + assertTrue(!this.fs.exists(spiedRegion.getRegionFileSystem().getDaughterRegionDir(st.getSecondDaughter()))); assertTrue(!this.parent.lock.writeLock().isHeldByCurrentThread()); // Now retry the split but do not throw an exception this time. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionForHumongousTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionForHumongousTable.java new file mode 100644 index 0000000..619b52b --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionForHumongousTable.java @@ -0,0 +1,41 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.fs.layout.FsLayout; +import org.apache.hadoop.hbase.fs.layout.HierarchicalFsLayout; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.experimental.categories.Category; + +@Category({RegionServerTests.class, SmallTests.class}) +public class TestSplitTransactionForHumongousTable extends TestSplitTransaction { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + FsLayout.setLayoutForTesting(HierarchicalFsLayout.get()); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + FsLayout.reset(); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java index 2fe5654..00b1040 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java @@ -76,6 +76,7 @@ import org.apache.hadoop.hbase.coordination.ZkCoordinatedStateManager; import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver; import org.apache.hadoop.hbase.coprocessor.ObserverContext; import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterRpcServices; @@ -982,8 +983,8 @@ public class TestSplitTransactionOnCluster { FSUtils.getTableDir(cluster.getMaster().getMasterFileSystem().getRootDir(), desc.getTableName()); tableDir.getFileSystem(cluster.getConfiguration()); - List regionDirs = - FSUtils.getRegionDirs(tableDir.getFileSystem(cluster.getConfiguration()), tableDir); + List regionDirs = cluster.getMaster().getMasterFileSystem() + .getRegionDirs(desc.getTableName()); assertEquals(3,regionDirs.size()); cluster.startRegionServer(); regionServer.kill(); @@ -996,8 +997,8 @@ public class TestSplitTransactionOnCluster { AssignmentManager am = cluster.getMaster().getAssignmentManager(); assertEquals(am.getRegionStates().getRegionsInTransition().toString(), 0, am .getRegionStates().getRegionsInTransition().size()); - regionDirs = - FSUtils.getRegionDirs(tableDir.getFileSystem(cluster.getConfiguration()), tableDir); + regionDirs = cluster.getMaster().getMasterFileSystem() + .getRegionDirs(desc.getTableName()); assertEquals(1,regionDirs.size()); } finally { TESTING_UTIL.deleteTable(table); @@ -1255,7 +1256,7 @@ public class TestSplitTransactionOnCluster { } } - private List awaitTableRegions(final TableName tableName) throws InterruptedException { + List awaitTableRegions(final TableName tableName) throws InterruptedException { List regions = null; for (int i = 0; i < 100; i++) { regions = cluster.getRegions(tableName); @@ -1265,15 +1266,16 @@ public class TestSplitTransactionOnCluster { return regions; } - private Table createTableAndWait(TableName tableName, byte[] cf) throws IOException, +Table createTableAndWait(TableName tableName, byte[] cf) throws IOException, InterruptedException { - Table t = TESTING_UTIL.createTable(tableName, cf); + HTableDescriptor desc = new HTableDescriptor(tableName); + HTable t = TESTING_UTIL.createTable(desc, new byte[][]{cf}, TESTING_UTIL.getConfiguration()); awaitTableRegions(tableName); assertTrue("Table not online: " + tableName, cluster.getRegions(tableName).size() != 0); return t; } - + private static class SplittingNodeCreationFailedException extends IOException { private static final long serialVersionUID = 1652404976265623004L; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java index 499e57c..c6dbbc7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.BlockCache; @@ -98,10 +99,10 @@ public class TestStoreFile extends HBaseTestCase { */ @Test public void testBasicHalfMapFile() throws Exception { - final HRegionInfo hri = - new HRegionInfo(TableName.valueOf("testBasicHalfMapFileTb")); + final HRegionInfo hri = createHRegionInfo(TableName.valueOf("testBasicHalfMapFileTb"), null, null); + Path tableDir = FSUtils.getTableDir(this.testDir, TableName.valueOf("testBasicHalfMapFileTb")); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem( - conf, fs, new Path(testDir, hri.getTable().getNameAsString()), hri); + conf, fs, tableDir, hri); HFileContext meta = new HFileContextBuilder().withBlockSize(2*1024).build(); StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs) @@ -151,9 +152,10 @@ public class TestStoreFile extends HBaseTestCase { */ @Test public void testReference() throws IOException { - final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testReferenceTb")); + final HRegionInfo hri = createHRegionInfo(TableName.valueOf("testReferenceTb"), null, null); + Path tableDir = FSUtils.getTableDir(this.testDir, TableName.valueOf("testReferenceTb")); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem( - conf, fs, new Path(testDir, hri.getTable().getNameAsString()), hri); + conf, fs, tableDir, hri); HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); // Make a store file and write data to it. @@ -175,7 +177,7 @@ public class TestStoreFile extends HBaseTestCase { kv = KeyValueUtil.createKeyValueFromKey(reader.getLastKey()); byte [] finalRow = kv.getRow(); // Make a reference - HRegionInfo splitHri = new HRegionInfo(hri.getTable(), null, midRow); + HRegionInfo splitHri = createHRegionInfo(hri.getTable(), null, midRow); Path refPath = splitStoreFile(regionFs, splitHri, TEST_FAMILY, hsf, midRow, true); StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf, BloomType.NONE); @@ -195,7 +197,7 @@ public class TestStoreFile extends HBaseTestCase { @Test public void testHFileLink() throws IOException { - final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testHFileLinkTb")); + final HRegionInfo hri = createHRegionInfo(TableName.valueOf("testHFileLinkTb"), null, null); // force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/ Configuration testConf = new Configuration(this.conf); FSUtils.setRootDir(testConf, testDir); @@ -211,7 +213,7 @@ public class TestStoreFile extends HBaseTestCase { writeStoreFile(writer); Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath()); - Path dstPath = new Path(regionFs.getTableDir(), new Path("test-region", TEST_FAMILY)); + Path dstPath = createDestinationPath(regionFs); HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName()); Path linkFilePath = new Path(dstPath, HFileLink.createHFileLinkName(hri, storeFilePath.getName())); @@ -243,7 +245,7 @@ public class TestStoreFile extends HBaseTestCase { FSUtils.setRootDir(testConf, testDir); // adding legal table name chars to verify regex handles it. - HRegionInfo hri = new HRegionInfo(TableName.valueOf("_original-evil-name")); + HRegionInfo hri = createHRegionInfo(TableName.valueOf("_original-evil-name"), null, null); HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem( testConf, fs, FSUtils.getTableDir(testDir, hri.getTable()), hri); @@ -257,7 +259,7 @@ public class TestStoreFile extends HBaseTestCase { Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath()); // create link to store file. /clone/region//--

- HRegionInfo hriClone = new HRegionInfo(TableName.valueOf("clone")); + HRegionInfo hriClone = createHRegionInfo(TableName.valueOf("clone"), null, null); HRegionFileSystem cloneRegionFs = HRegionFileSystem.createRegionOnFileSystem( testConf, fs, FSUtils.getTableDir(testDir, hri.getTable()), hriClone); @@ -269,8 +271,8 @@ public class TestStoreFile extends HBaseTestCase { // create splits of the link. // /clone/splitA//, // /clone/splitB// - HRegionInfo splitHriA = new HRegionInfo(hri.getTable(), null, SPLITKEY); - HRegionInfo splitHriB = new HRegionInfo(hri.getTable(), SPLITKEY, null); + HRegionInfo splitHriA = createHRegionInfo(hri.getTable(), null, SPLITKEY); + HRegionInfo splitHriB = createHRegionInfo(hri.getTable(), SPLITKEY, null); StoreFile f = new StoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE); Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true); // top Path pathB = splitStoreFile(cloneRegionFs, splitHriB, TEST_FAMILY, f, SPLITKEY, false);// bottom @@ -318,11 +320,11 @@ public class TestStoreFile extends HBaseTestCase { KeyValue midKV = (KeyValue)midkey; byte [] midRow = midKV.getRow(); // Create top split. - HRegionInfo topHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), + HRegionInfo topHri = createHRegionInfo(regionFs.getRegionInfo().getTable(), null, midRow); Path topPath = splitStoreFile(regionFs, topHri, TEST_FAMILY, f, midRow, true); // Create bottom split. - HRegionInfo bottomHri = new HRegionInfo(regionFs.getRegionInfo().getTable(), + HRegionInfo bottomHri = createHRegionInfo(regionFs.getRegionInfo().getTable(), midRow, null); Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false); // Make readers on top and bottom. @@ -1037,5 +1039,19 @@ public class TestStoreFile extends HBaseTestCase { byte[] value = fileInfo.get(HFileDataBlockEncoder.DATA_BLOCK_ENCODING); assertEquals(dataBlockEncoderAlgo.getNameInBytes(), value); } + + HRegionInfo createHRegionInfo(final TableName tableName, final byte[] startKey, + final byte[] endKey) { + return new HRegionInfo(tableName, startKey, endKey); + } + + String getTestRegionName() { + String regionName = "d9ffc3a5cd016ae58e23d7a6cb937949"; + return regionName; + } + + Path createDestinationPath(HRegionFileSystem regionFs) { + return HStore.getStoreHomedir(regionFs.getTableDir(), regionFs.getRegionInfo(), Bytes.toBytes(TEST_FAMILY)); + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java index 3bb0384..8667d0e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java @@ -40,7 +40,9 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.Bytes; @@ -55,7 +57,8 @@ public class TestStripeStoreFileManager { private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static final Path BASEDIR = TEST_UTIL.getDataTestDir(TestStripeStoreFileManager.class.getSimpleName()); - private static final Path CFDIR = HStore.getStoreHomedir(BASEDIR, "region", Bytes.toBytes("cf")); + private static final HRegionInfo hri = new HRegionInfo(TableName.valueOf("TestStripeStoreFileManager")); + private static final Path CFDIR = HStore.getStoreHomedir(BASEDIR, hri, Bytes.toBytes("cf")); private static final byte[] KEY_A = Bytes.toBytes("aaa"); private static final byte[] KEY_B = Bytes.toBytes("bbb"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHumongousWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHumongousWALReplay.java new file mode 100644 index 0000000..d81b822 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHumongousWALReplay.java @@ -0,0 +1,46 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver.wal; + +import org.apache.hadoop.hbase.fs.layout.FsLayout; +import org.apache.hadoop.hbase.fs.layout.HierarchicalFsLayout; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.experimental.categories.Category; + +@Category({RegionServerTests.class, MediumTests.class}) +public class TestHumongousWALReplay extends TestWALReplay { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + FsLayout.setLayoutForTesting(HierarchicalFsLayout.get()); + TestWALReplay.setUpBeforeClass(); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + try { + TestWALReplay.tearDownAfterClass(); + } finally { + FsLayout.reset(); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java index c943d12..dfe2e6a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java @@ -70,6 +70,7 @@ import org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher; import org.apache.hadoop.hbase.regionserver.FlushRequestListener; import org.apache.hadoop.hbase.regionserver.FlushRequester; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.MemStoreSnapshot; import org.apache.hadoop.hbase.regionserver.Region; @@ -901,9 +902,12 @@ public class TestWALReplay { assertTrue(listStatus.length > 0); WALSplitter.splitLogFile(hbaseRootDir, listStatus[0], this.fs, this.conf, null, null, null, mode, wals); + Path tableDir = FSUtils.getTableDir(hbaseRootDir, tableName); + HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, tableDir, hri); + Path regionDir = hrfs.getRegionDir(); FileStatus[] listStatus1 = this.fs.listStatus( - new Path(FSUtils.getTableDir(hbaseRootDir, tableName), new Path(hri.getEncodedName(), - "recovered.edits")), new PathFilter() { + new Path(regionDir, + "recovered.edits"), new PathFilter() { @Override public boolean accept(Path p) { if (WALSplitter.isSequenceIdFile(p)) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java index d7b552e..6d4a107 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.master.HMaster; @@ -331,22 +332,22 @@ public class SnapshotTestingUtils { * @return array of the current HFiles in the table (could be a zero-length array) * @throws IOException on unexecpted error reading the FS */ - public static Path[] listHFiles(final FileSystem fs, final Path tableDir) + public static Path[] listHFiles(final FileSystem fs, HTableDescriptor desc, final Path tableDir) throws IOException { final ArrayList hfiles = new ArrayList(); FSVisitor.visitTableStoreFiles(fs, tableDir, new FSVisitor.StoreFileVisitor() { @Override public void storeFile(final String region, final String family, final String hfileName) throws IOException { - hfiles.add(new Path(tableDir, new Path(region, new Path(family, hfileName)))); + hfiles.add(new Path(new Path(FsLayout.getRegionDir(tableDir, region), family), hfileName)); } }); return hfiles.toArray(new Path[hfiles.size()]); } - public static String[] listHFileNames(final FileSystem fs, final Path tableDir) + public static String[] listHFileNames(final FileSystem fs, HTableDescriptor desc, final Path tableDir) throws IOException { - Path[] files = listHFiles(fs, tableDir); + Path[] files = listHFiles(fs, desc, tableDir); String[] names = new String[files.length]; for (int i = 0; i < files.length; ++i) { names[i] = files[i].getName(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportHumongousSnapshot.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportHumongousSnapshot.java new file mode 100644 index 0000000..88f579c --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportHumongousSnapshot.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.snapshot; + +import org.apache.hadoop.hbase.fs.layout.FsLayout; +import org.apache.hadoop.hbase.fs.layout.HierarchicalFsLayout; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.experimental.categories.Category; + +@Category({VerySlowRegionServerTests.class, MediumTests.class}) +public class TestExportHumongousSnapshot extends TestExportSnapshot { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + FsLayout.setLayoutForTesting(HierarchicalFsLayout.get()); + TestExportSnapshot.setUpBeforeClass(); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + try { + TestExportSnapshot.tearDownAfterClass(); + } finally { + FsLayout.reset(); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java index 1e16f8f..ef7dfcb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo; import org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests; @@ -376,9 +377,11 @@ public class TestExportSnapshot { if (storeFile.hasReference()) { // Nothing to do here, we have already the reference embedded } else { - verifyNonEmptyFile(new Path(exportedArchive, - new Path(FSUtils.getTableDir(new Path("./"), tableName), - new Path(regionInfo.getEncodedName(), new Path(family, hfile))))); + Path tableDir = FSUtils.getTableDir(new Path("./"), tableName); + HRegionFileSystem hrfs = HRegionFileSystem.create(TEST_UTIL.getConfiguration(), fs, tableDir, regionInfo); + Path regionDir = hrfs.getRegionDir(); + verifyNonEmptyFile(new Path(new Path(exportedArchive, + regionDir), new Path(family, hfile))); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestHumongousRestoreSnapshotHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestHumongousRestoreSnapshotHelper.java new file mode 100644 index 0000000..b1ccd47 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestHumongousRestoreSnapshotHelper.java @@ -0,0 +1,40 @@ +///** +// * Licensed to the Apache Software Foundation (ASF) under one +// * or more contributor license agreements. See the NOTICE file +// * distributed with this work for additional information +// * regarding copyright ownership. The ASF licenses this file +// * to you under the Apache License, Version 2.0 (the +// * "License"); you may not use this file except in compliance +// * with the License. You may obtain a copy of the License at +// * +// * http://www.apache.org/licenses/LICENSE-2.0 +// * +// * Unless required by applicable law or agreed to in writing, software +// * distributed under the License is distributed on an "AS IS" BASIS, +// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// * See the License for the specific language governing permissions and +// * limitations under the License. +// */ +//package org.apache.hadoop.hbase.snapshot; +// +//import org.apache.hadoop.hbase.fs.layout.FsLayout; +//import org.apache.hadoop.hbase.fs.layout.HierarchicalFsLayout; +//import org.apache.hadoop.hbase.testclassification.RegionServerTests; +//import org.apache.hadoop.hbase.testclassification.SmallTests; +//import org.junit.AfterClass; +//import org.junit.BeforeClass; +//import org.junit.experimental.categories.Category; +// +//@Category({RegionServerTests.class, SmallTests.class}) +//public class TestHumongousRestoreSnapshotHelper extends TestRestoreSnapshotHelper { +// +// @BeforeClass +// public static void setUpBeforeClass() throws Exception { +// FsLayout.setLayoutForTesting(HierarchicalFsLayout.get()); +// } +// +// @AfterClass +// public static void tearDownAfterClass() throws Exception { +// FsLayout.reset(); +// } +//} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java index 3fac2fd..07e218b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java @@ -107,7 +107,7 @@ public class TestRestoreSnapshotHelper { private void verifyRestore(final Path rootDir, final HTableDescriptor sourceHtd, final HTableDescriptor htdClone) throws IOException { - String[] files = SnapshotTestingUtils.listHFileNames(fs, + String[] files = SnapshotTestingUtils.listHFileNames(fs, htdClone, FSUtils.getTableDir(rootDir, htdClone.getTableName())); assertEquals(12, files.length); for (int i = 0; i < files.length; i += 2) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java index 7b5aed5..d598cd8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java @@ -33,6 +33,8 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.backup.HFileArchiver; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.Store; @@ -211,10 +213,9 @@ public class HFileArchiveTestingUtil { * @return {@link Path} to the archive directory for the given region */ public static Path getRegionArchiveDir(Configuration conf, HRegion region) throws IOException { - return HFileArchiveUtil.getRegionArchiveDir( - FSUtils.getRootDir(conf), - region.getTableDesc().getTableName(), - region.getRegionInfo().getEncodedName()); + return HFileArchiver.getRegionArchiveDir(FSUtils.getRootDir(conf), + region.getTableDesc().getTableName(), region.getRegionFileSystem() + .getRegionDir()); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java index 142e46e..7aa9761 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestFSVisitor.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.wal.WALSplitter; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.junit.*; @@ -148,7 +149,7 @@ public class TestFSVisitor { for (int r = 0; r < 10; ++r) { String regionName = MD5Hash.getMD5AsHex(Bytes.toBytes(r)); tableRegions.add(regionName); - Path regionDir = new Path(tableDir, regionName); + Path regionDir = FsLayout.getRegionDir(tableDir, regionName); for (int f = 0; f < 3; ++f) { String familyName = "f" + f; tableFamilies.add(familyName); @@ -174,7 +175,7 @@ public class TestFSVisitor { private void createRecoverEdits(final Path tableDir, final Set tableRegions, final Set recoverEdits) throws IOException { for (String region: tableRegions) { - Path regionEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(new Path(tableDir, region)); + Path regionEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(FsLayout.getRegionDir(tableDir, region)); long seqId = System.currentTimeMillis(); for (int i = 0; i < 3; ++i) { String editName = String.format("%019d", seqId + i); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java index a285eae..c719676 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java @@ -77,7 +77,6 @@ import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HConnection; -import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -133,23 +132,23 @@ import com.google.common.collect.Multimap; public class TestHBaseFsck { static final int POOL_SIZE = 7; private static final Log LOG = LogFactory.getLog(TestHBaseFsck.class); - private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private final static Configuration conf = TEST_UTIL.getConfiguration(); - private final static String FAM_STR = "fam"; - private final static byte[] FAM = Bytes.toBytes(FAM_STR); + final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + final static Configuration conf = TEST_UTIL.getConfiguration(); + final static String FAM_STR = "fam"; + final static byte[] FAM = Bytes.toBytes(FAM_STR); private final static int REGION_ONLINE_TIMEOUT = 800; private static RegionStates regionStates; - private static ExecutorService tableExecutorService; - private static ScheduledThreadPoolExecutor hbfsckExecutorService; - private static ClusterConnection connection; + static ExecutorService tableExecutorService; + static ScheduledThreadPoolExecutor hbfsckExecutorService; + static ClusterConnection connection; private static Admin admin; // for the instance, reset every test run - private Table tbl; - private final static byte[][] SPLITS = new byte[][] { Bytes.toBytes("A"), + Table tbl; + final static byte[][] SPLITS = new byte[][] { Bytes.toBytes("A"), Bytes.toBytes("B"), Bytes.toBytes("C") }; // one row per region. - private final static byte[][] ROWKEYS= new byte[][] { + final static byte[][] ROWKEYS= new byte[][] { Bytes.toBytes("00"), Bytes.toBytes("50"), Bytes.toBytes("A0"), Bytes.toBytes("A5"), Bytes.toBytes("B0"), Bytes.toBytes("B5"), Bytes.toBytes("C0"), Bytes.toBytes("C5") }; @@ -282,7 +281,7 @@ public class TestHBaseFsck { /** * Create a new region in META. */ - private HRegionInfo createRegion(final HTableDescriptor + HRegionInfo createRegion(final HTableDescriptor htd, byte[] startKey, byte[] endKey) throws IOException { Table meta = connection.getTable(TableName.META_TABLE_NAME, tableExecutorService); @@ -370,8 +369,9 @@ public class TestHBaseFsck { LOG.info("deleting hdfs .regioninfo data: " + hri.toString() + hsa.toString()); Path rootDir = FSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); - Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()), - hri.getEncodedName()); + Path tableDir = FSUtils.getTableDir(rootDir, htd.getTableName()); + HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, tableDir, hri); + Path p = hrfs.getRegionDir(); Path hriPath = new Path(p, HRegionFileSystem.REGION_INFO_FILE); fs.delete(hriPath, true); } @@ -380,8 +380,9 @@ public class TestHBaseFsck { LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString()); Path rootDir = FSUtils.getRootDir(conf); FileSystem fs = rootDir.getFileSystem(conf); - Path p = new Path(FSUtils.getTableDir(rootDir, htd.getTableName()), - hri.getEncodedName()); + Path tableDir = FSUtils.getTableDir(rootDir, htd.getTableName()); + HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, tableDir, hri); + Path p = hrfs.getRegionDir(); HBaseFsck.debugLsr(conf, p); boolean success = fs.delete(p, true); LOG.info("Deleted " + p + " sucessfully? " + success); @@ -441,7 +442,7 @@ public class TestHBaseFsck { } tbl.put(puts); } - + /** * Counts the number of row to verify data loss or non-dataloss. */ @@ -2138,20 +2139,11 @@ public class TestHBaseFsck { * @throws IOException */ Path getFlushedHFile(FileSystem fs, TableName table) throws IOException { - Path tableDir= FSUtils.getTableDir(FSUtils.getRootDir(conf), table); - Path regionDir = FSUtils.getRegionDirs(fs, tableDir).get(0); - Path famDir = new Path(regionDir, FAM_STR); - - // keep doing this until we get a legit hfile + Path rootDir = FSUtils.getRootDir(conf); while (true) { - FileStatus[] hfFss = fs.listStatus(famDir); - if (hfFss.length == 0) { - continue; - } - for (FileStatus hfs : hfFss) { - if (!hfs.isDirectory()) { - return hfs.getPath(); - } + Map tableStoreFilePathMap = FSUtils.getTableStoreFilePathMap(null, fs, rootDir, table); + if (!tableStoreFilePathMap.isEmpty()) { + return tableStoreFilePathMap.values().iterator().next(); } } } @@ -2339,9 +2331,12 @@ public class TestHBaseFsck { // Mess it up by creating a fake reference file FileSystem fs = FileSystem.get(conf); Path tableDir= FSUtils.getTableDir(FSUtils.getRootDir(conf), table); - Path regionDir = FSUtils.getRegionDirs(fs, tableDir).get(0); + // Just pick a random region + HRegionInfo hri = MetaTableAccessor.getTableRegions(TEST_UTIL.getConnection(), table).get(0); + HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, tableDir, hri); + Path regionDir = hrfs.getRegionDir(); Path famDir = new Path(regionDir, FAM_STR); - Path fakeReferenceFile = new Path(famDir, "fbce357483ceea.12144538"); + Path fakeReferenceFile = new Path(famDir, "fbce357483ceea.d9ffc3a5cd016ae58e23d7a6cb937949"); fs.create(fakeReferenceFile); HBaseFsck hbck = doFsck(conf, false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckHumongousTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckHumongousTable.java new file mode 100644 index 0000000..527b37c --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckHumongousTable.java @@ -0,0 +1,49 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import org.apache.hadoop.hbase.fs.layout.FsLayout; +import org.apache.hadoop.hbase.fs.layout.HierarchicalFsLayout; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MiscTests; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.experimental.categories.Category; + +/** + * This tests HBaseFsck's ability to detect reasons for inconsistent humongous tables. + */ +@Category({MiscTests.class, LargeTests.class}) +public class TestHBaseFsckHumongousTable extends TestHBaseFsck { + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + FsLayout.setLayoutForTesting(HierarchicalFsLayout.get()); + TestHBaseFsck.setUpBeforeClass(); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + try { + TestHBaseFsck.tearDownAfterClass(); + } finally { + FsLayout.reset(); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHFileArchiveUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHFileArchiveUtil.java index ab14c41..4de2200 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHFileArchiveUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHFileArchiveUtil.java @@ -23,6 +23,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.backup.HFileArchiver; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Test; @@ -52,7 +54,7 @@ public class TestHFileArchiveUtil { @Test public void testRegionArchiveDir() { Path regionDir = new Path("region"); - assertNotNull(HFileArchiveUtil.getRegionArchiveDir(rootDir, + assertNotNull(HFileArchiver.getRegionArchiveDir(rootDir, TableName.valueOf("table"), regionDir)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java index c847945..706bec3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.coprocessor.CoprocessorHost; import org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver; +import org.apache.hadoop.hbase.fs.layout.FsLayout; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.util.Bytes; @@ -68,6 +69,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestName; +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; // imports for things that haven't moved from regionserver.wal yet. import org.apache.hadoop.hbase.regionserver.wal.HLogKey; import org.apache.hadoop.hbase.regionserver.wal.SequenceFileLogReader; @@ -176,8 +178,9 @@ public class TestWALFactory { for(int i = 0; i < howmany; i++) { infos[i] = new HRegionInfo(tableName, Bytes.toBytes("" + i), Bytes.toBytes("" + (i+1)), false); - fs.mkdirs(new Path(tabledir, infos[i].getEncodedName())); - LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString()); + HRegionFileSystem hrfs = HRegionFileSystem.createRegionOnFileSystem( + conf, fs, tabledir, infos[i]); + LOG.info("allo " + hrfs.getRegionDir()); } HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("column")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java index e263cdb..f021a13 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java @@ -62,9 +62,9 @@ import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableDescriptor; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; -import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.wal.WAL.Entry; import org.apache.hadoop.hbase.wal.WAL.Reader; import org.apache.hadoop.hbase.wal.WALProvider.Writer; @@ -72,6 +72,7 @@ import org.apache.hadoop.hbase.wal.WALSplitter.CorruptedLogFileException; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CancelableProgressable; +import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -92,6 +93,13 @@ import org.mockito.stubbing.Answer; import com.google.common.base.Joiner; import com.google.common.collect.ImmutableList; + + + + + + +import org.apache.hadoop.hbase.regionserver.HRegionFileSystem; // imports for things that haven't moved from regionserver.wal yet. import org.apache.hadoop.hbase.regionserver.wal.WALEdit; import org.apache.hadoop.hbase.regionserver.wal.InstrumentedLogWriter; @@ -396,15 +404,14 @@ public class TestWALSplit { public void testOldRecoveredEditsFileSidelined() throws IOException { byte [] encoded = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(); Path tdir = FSUtils.getTableDir(HBASEDIR, TableName.META_TABLE_NAME); - Path regiondir = new Path(tdir, - HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); - fs.mkdirs(regiondir); + HRegionFileSystem metaHrfs = HRegionFileSystem.createRegionOnFileSystem( + conf, fs, tdir, HRegionInfo.FIRST_META_REGIONINFO); long now = System.currentTimeMillis(); Entry entry = new Entry(new WALKey(encoded, TableName.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID), new WALEdit()); - Path parent = WALSplitter.getRegionDirRecoveredEditsDir(regiondir); + Path parent = WALSplitter.getRegionDirRecoveredEditsDir(metaHrfs.getRegionDir()); assertEquals(parent.getName(), HConstants.RECOVERED_EDITS_DIR); fs.createNewFile(parent); // create a recovered.edits file @@ -688,7 +695,9 @@ public class TestWALSplit { useDifferentDFSClient(); String region = "break"; - Path regiondir = new Path(TABLEDIR, region); + HRegionInfo hri = HRegionInfo.makeTestInfoWithEncodedName(TABLE_NAME, region); + HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, TABLEDIR, hri); + Path regiondir = hrfs.getRegionDir(); fs.mkdirs(regiondir); InstrumentedLogWriter.activateFailure = false; @@ -711,13 +720,15 @@ public class TestWALSplit { @Test (timeout=300000) public void testSplitDeletedRegion() throws IOException { REGIONS.clear(); - String region = "region_that_splits"; + String region = "d9ffc3a5cd016-region_that_splits"; REGIONS.add(region); generateWALs(1); useDifferentDFSClient(); - Path regiondir = new Path(TABLEDIR, region); + HRegionInfo hri = HRegionInfo.makeTestInfoWithEncodedName(TABLE_NAME, region); + HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, TABLEDIR, hri); + Path regiondir = hrfs.getRegionDir(); fs.delete(regiondir, true); WALSplitter.split(HBASEDIR, WALDIR, OLDLOGDIR, fs, conf, wals); assertFalse(fs.exists(regiondir)); @@ -1008,14 +1019,16 @@ public class TestWALSplit { @Test (timeout=300000) public void testSplitLogFileDeletedRegionDir() throws IOException { LOG.info("testSplitLogFileDeletedRegionDir"); - final String REGION = "region__1"; + final String REGION = "testSplitLogFileDeletedRegionDir"; REGIONS.clear(); REGIONS.add(REGION); generateWALs(1, 10, -1); useDifferentDFSClient(); - Path regiondir = new Path(TABLEDIR, REGION); + HRegionInfo hri = HRegionInfo.makeTestInfoWithEncodedName(TABLE_NAME, REGION); + HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, TABLEDIR, hri); + Path regiondir = hrfs.getRegionDir(); LOG.info("Region directory is" + regiondir); fs.delete(regiondir, true); WALSplitter.split(HBASEDIR, WALDIR, OLDLOGDIR, fs, conf, wals); @@ -1069,7 +1082,9 @@ public class TestWALSplit { LOG.info("testConcurrentSplitLogAndReplayRecoverEdit"); // Generate wals for our destination region String regionName = "r0"; - final Path regiondir = new Path(TABLEDIR, regionName); + HRegionInfo hri = HRegionInfo.makeTestInfoWithEncodedName(TABLE_NAME, regionName); + HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, TABLEDIR, hri); + final Path regiondir = hrfs.getRegionDir(); REGIONS.clear(); REGIONS.add(regionName); generateWALs(-1); @@ -1123,8 +1138,11 @@ public class TestWALSplit { private void makeRegionDirs(List regions) throws IOException { for (String region : regions) { + HRegionInfo hri = HRegionInfo.makeTestInfoWithEncodedName(TABLE_NAME, region); + HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, TABLEDIR, hri); + Path regiondir = hrfs.getRegionDir(); LOG.debug("Creating dir for region " + region); - fs.mkdirs(new Path(TABLEDIR, region)); + fs.mkdirs(regiondir); } } @@ -1161,9 +1179,10 @@ public class TestWALSplit { private Path[] getLogForRegion(Path rootdir, TableName table, String region) throws IOException { Path tdir = FSUtils.getTableDir(rootdir, table); - @SuppressWarnings("deprecation") - Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(HRegion.getRegionDir(tdir, - Bytes.toString(region.getBytes()))); + HRegionInfo hri = HRegionInfo.makeTestInfoWithEncodedName(table, region); + HRegionFileSystem hrfs = HRegionFileSystem.create(conf, fs, tdir, hri); + Path regiondir = hrfs.getRegionDir(); + Path editsdir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir); FileStatus[] files = fs.listStatus(editsdir, new PathFilter() { @Override public boolean accept(Path p) { -- 1.7.10.2 (Apple Git-33)