From 8352b92fa7c9890ab59345bcdd48e474348e303d Mon Sep 17 00:00:00 2001 From: Umesh Agashe Date: Tue, 18 Oct 2016 14:46:16 -0700 Subject: [PATCH] HBASE-16862 Removed filesystem/ directory layout references from the code in master/procedure dir Added required APIs in MasterStorage/ RegionStorage. --- .../org/apache/hadoop/hbase/fs/MasterStorage.java | 49 +++++++++++- .../hadoop/hbase/fs/legacy/LegacyLayout.java | 4 + .../hbase/fs/legacy/LegacyMasterStorage.java | 43 ++++++++++- .../hadoop/hbase/master/AssignmentManager.java | 1 - .../master/procedure/CloneSnapshotProcedure.java | 71 +++++++---------- .../master/procedure/CreateTableProcedure.java | 72 ++++++----------- .../master/procedure/DeleteTableProcedure.java | 89 ++-------------------- .../master/procedure/DisableTableProcedure.java | 2 - .../master/procedure/RestoreSnapshotProcedure.java | 33 ++------ .../master/procedure/TruncateTableProcedure.java | 5 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 12 ++- .../hbase/snapshot/RestoreSnapshotHelper.java | 42 ++++------ .../hadoop/hbase/snapshot/SnapshotManifest.java | 8 ++ .../hadoop/hbase/util/ModifyRegionUtils.java | 26 +++---- .../hbase/snapshot/TestRestoreSnapshotHelper.java | 6 +- 15 files changed, 199 insertions(+), 264 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java index 7adfe70..87c14bd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java @@ -116,18 +116,42 @@ public abstract class MasterStorage { // ========================================================================== // PUBLIC Methods - Table related // ========================================================================== + + /** + * Deletes table from master storage (without archival before delete and from DATA context) + * @param tableName + * @throws IOException + */ public void deleteTable(TableName tableName) throws IOException { - deleteTable(StorageContext.DATA, tableName); + deleteTable(StorageContext.DATA, tableName, false); } + /** + * Deletes table from master storage (from DATA context) + * @param tableName + * @param archive if true, all regions are archived before deletion + * @throws IOException + */ + public void deleteTable(TableName tableName, boolean archive) throws IOException { + deleteTable(StorageContext.DATA, tableName, archive); + } + + /** + * Deletes table from master storage + * @param ctx Storage context of the table + * @param tableName + * @param archive if true, all regions are archived for the table before deletion. + * @throws IOException + */ + public abstract void deleteTable(StorageContext ctx, TableName tableName, boolean archive) + throws IOException; + public Collection getTables(String namespace) throws IOException { return getTables(StorageContext.DATA, namespace); } - public abstract void deleteTable(StorageContext ctx, TableName tableName) throws IOException; - public abstract Collection getTables(StorageContext ctx, String namespace) - throws IOException; + throws IOException; public Collection getTables() throws IOException { ArrayList tables = new ArrayList(); @@ -137,6 +161,23 @@ public abstract class MasterStorage { return tables; } + /** + * Archives specified table and all it's regions + * @param tableName + * @throws IOException + */ + public void archiveTable(TableName tableName) throws IOException { + archiveTable(StorageContext.DATA, tableName); + } + + /** + * Archives specified table and all it's regions + * @param ctx Storage context of the table. + * @param tableName + * @throws IOException + */ + public abstract void archiveTable(StorageContext ctx, TableName tableName) throws IOException; + // ========================================================================== // PUBLIC Methods - Table Region related // ========================================================================== diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyLayout.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyLayout.java index 53f4fb0..34dcc1d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyLayout.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyLayout.java @@ -113,6 +113,10 @@ public final class LegacyLayout { return new Path(rootDir, MobConstants.MOB_DIR_NAME); } + public static Path getMobTableDir(Path rootDir, TableName table) { + return new Path(getMobDir(rootDir), table.getQualifierAsString()); + } + public static Path getBulkDir(Path rootDir) { return new Path(rootDir, HConstants.BULKLOAD_STAGING_DIR_NAME); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterStorage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterStorage.java index db6304f..3e481dd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterStorage.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/LegacyMasterStorage.java @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.fs.StorageContext; @@ -162,11 +163,27 @@ public class LegacyMasterStorage extends MasterStorage { // PUBLIC Methods - Table related // ========================================================================== @Override - public void deleteTable(StorageContext ctx, TableName tableName) throws IOException { + public void deleteTable(StorageContext ctx, TableName tableName, boolean archive) + throws IOException { + if (archive) { + archiveTable(ctx, tableName); + } + Path tableDir = getTableDir(ctx, tableName); + if (LOG.isDebugEnabled()) { + LOG.debug("Deleting table '" + tableName + "' from '" + tableDir + "'."); + } if (!FSUtils.deleteDirectory(getFileSystem(), tableDir)) { throw new IOException("Failed delete of " + tableName); } + + Path mobTableDir = LegacyLayout.getMobTableDir(getRootContainer().path, tableName); + if (LOG.isDebugEnabled()) { + LOG.debug("Deleting MOB data '" + mobTableDir + "'."); + } + if (!FSUtils.deleteDirectory(getFileSystem(), mobTableDir)) { + throw new IOException("Failed delete MOB data of table " + tableName); + } } @Override @@ -183,6 +200,27 @@ public class LegacyMasterStorage extends MasterStorage { return tables; } + @Override + public void archiveTable(StorageContext ctx, TableName tableName) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Archiving table '" + tableName + "' from storage"); + } + + // archive all regions + for (HRegionInfo hri : getRegions(ctx, tableName)) { + archiveRegion(hri); + } + + // archive MOB data + Path mobTableDir = LegacyLayout.getMobTableDir(getRootContainer().path, tableName); + Path mobRegionDir = new Path(mobTableDir, + MobUtils.getMobRegionInfo(tableName).getEncodedName()); + if (getFileSystem().exists(mobRegionDir)) { + HFileArchiver.archiveRegion(getFileSystem(), getRootContainer().path, mobTableDir, + mobRegionDir); + } + } + // ========================================================================== // PUBLIC Methods - Table Regions related // ========================================================================== @@ -216,6 +254,9 @@ public class LegacyMasterStorage extends MasterStorage { */ @Override public void archiveRegion(HRegionInfo regionInfo) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("Archiving region '" + regionInfo.getRegionNameAsString() + "' from storage."); + } HFileArchiver.archiveRegion(getConfiguration(), getFileSystem(), regionInfo); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java index 54f8391..ab058da 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java @@ -61,7 +61,6 @@ import org.apache.hadoop.hbase.RegionLocations; import org.apache.hadoop.hbase.RegionStateListener; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.MasterSwitchType; import org.apache.hadoop.hbase.client.RegionReplicaUtil; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java index f3cabbd..e8e75c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java @@ -31,8 +31,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MetaTableAccessor; @@ -40,26 +38,21 @@ import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; -import org.apache.hadoop.hbase.fs.StorageContext; import org.apache.hadoop.hbase.fs.MasterStorage; -import org.apache.hadoop.hbase.fs.StorageIdentifier; -import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MetricsSnapshot; -import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure.CreateHdfsRegions; +import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure.CreateStorageRegions; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CloneSnapshotState; -import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; -import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.SnapshotManifest; import com.google.common.base.Preconditions; @@ -128,7 +121,7 @@ public class CloneSnapshotProcedure setNextState(CloneSnapshotState.CLONE_SNAPSHOT_WRITE_FS_LAYOUT); break; case CLONE_SNAPSHOT_WRITE_FS_LAYOUT: - newRegions = createFilesystemLayout(env, hTableDescriptor, newRegions); + newRegions = createStorageLayout(env, hTableDescriptor, newRegions); setNextState(CloneSnapshotState.CLONE_SNAPSHOT_ADD_TO_META); break; case CLONE_SNAPSHOT_ADD_TO_META: @@ -310,11 +303,7 @@ public class CloneSnapshotProcedure // Check and update namespace quota final MasterStorage ms = env.getMasterServices().getMasterStorage(); - SnapshotManifest manifest = SnapshotManifest.open( - env.getMasterConfiguration(), - ms.getFileSystem(), - SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, ((LegacyPathIdentifier) ms - .getRootContainer()).path), snapshot); + SnapshotManifest manifest = SnapshotManifest.open(env.getMasterConfiguration(), snapshot); ProcedureSyncWait.getMasterQuotaManager(env) .checkNamespaceTableAndRegionQuota(getTableName(), manifest.getRegionManifestsMap().size()); @@ -343,24 +332,21 @@ public class CloneSnapshotProcedure } /** - * Create regions in file system. + * Create regions on storage. * @param env MasterProcedureEnv * @throws IOException */ - private List createFilesystemLayout( + private List createStorageLayout( final MasterProcedureEnv env, final HTableDescriptor hTableDescriptor, final List newRegions) throws IOException { - return createFsLayout(env, hTableDescriptor, newRegions, new CreateHdfsRegions() { + return createTableOnStorage(env, hTableDescriptor, newRegions, new CreateStorageRegions() { @Override - public List createHdfsRegions( - final MasterProcedureEnv env, - final Path tableRootDir, final TableName tableName, - final List newRegions) throws IOException { - - final MasterStorage ms = env.getMasterServices().getMasterStorage(); - final FileSystem fs = ms.getFileSystem(); - final StorageIdentifier rootContainer = ms.getRootContainer(); + public List createRegionsOnStorage( + final MasterProcedureEnv env, + final TableName tableName, + final List newRegions) throws IOException { + final Configuration conf = env.getMasterConfiguration(); final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher(); @@ -368,12 +354,11 @@ public class CloneSnapshotProcedure try { // 1. Execute the on-disk Clone - Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, - ((LegacyPathIdentifier) rootContainer).path); - SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, snapshot); - RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper( - conf, fs, manifest, hTableDescriptor, tableRootDir, monitorException, monitorStatus); - RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions(); + SnapshotManifest manifest = SnapshotManifest.open(conf, snapshot); + RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper(conf, manifest, + hTableDescriptor, monitorException, monitorStatus); + RestoreSnapshotHelper.RestoreMetaChanges metaChanges = + restoreHelper.restoreStorageRegions(); // Clone operation should not have stuff to restore or remove Preconditions.checkArgument( @@ -405,30 +390,28 @@ public class CloneSnapshotProcedure } /** - * Create region layout in file system. + * Create region layout on storage. * @param env MasterProcedureEnv * @throws IOException */ - private List createFsLayout( + private List createTableOnStorage( final MasterProcedureEnv env, final HTableDescriptor hTableDescriptor, List newRegions, - final CreateHdfsRegions hdfsRegionHandler) throws IOException { + final CreateStorageRegions storageRegionHandler) throws IOException { final MasterStorage ms = env.getMasterServices().getMasterStorage(); - final Path tempdir = ((LegacyPathIdentifier)ms.getTempContainer()).path; - // 1. Create Table Descriptor + // 1. Delete existing artifacts (dir, files etc) for the table + ms.deleteTable(hTableDescriptor.getTableName()); + + // 2. Create Table Descriptor // using a copy of descriptor, table will be created enabling first HTableDescriptor underConstruction = new HTableDescriptor(hTableDescriptor); - final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName()); - ms.createTableDescriptor(StorageContext.TEMP, underConstruction, false); - - // 2. Create Regions - newRegions = hdfsRegionHandler.createHdfsRegions( - env, tempdir, hTableDescriptor.getTableName(), newRegions); + ms.createTableDescriptor(underConstruction, true); - // 3. Move Table temp directory to the hbase root location - CreateTableProcedure.moveTempDirectoryToHBaseRoot(env, hTableDescriptor, tempTableDir); + // 3. Create Regions + newRegions = storageRegionHandler.createRegionsOnStorage(env, hTableDescriptor.getTableName(), + newRegions); return newRegions; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java index e44192d..dc4b231 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java @@ -27,8 +27,6 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; @@ -37,16 +35,14 @@ import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.client.TableState; -import org.apache.hadoop.hbase.fs.StorageContext; import org.apache.hadoop.hbase.fs.MasterStorage; -import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier; +import org.apache.hadoop.hbase.fs.StorageIdentifier; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.CreateTableState; -import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.ModifyRegionUtils; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; @@ -100,7 +96,7 @@ public class CreateTableProcedure setNextState(CreateTableState.CREATE_TABLE_WRITE_FS_LAYOUT); break; case CREATE_TABLE_WRITE_FS_LAYOUT: - newRegions = createFsLayout(env, hTableDescriptor, newRegions); + newRegions = createTableOnStorage(env, hTableDescriptor, newRegions); setNextState(CreateTableState.CREATE_TABLE_ADD_TO_META); break; case CREATE_TABLE_ADD_TO_META: @@ -267,69 +263,47 @@ public class CreateTableProcedure } } - protected interface CreateHdfsRegions { - List createHdfsRegions(final MasterProcedureEnv env, - final Path tableRootDir, final TableName tableName, - final List newRegions) throws IOException; + protected interface CreateStorageRegions { + List createRegionsOnStorage(final MasterProcedureEnv env, + final TableName tableName, final List newRegions) throws IOException; } - protected static List createFsLayout(final MasterProcedureEnv env, + protected static List createTableOnStorage(final MasterProcedureEnv env, final HTableDescriptor hTableDescriptor, final List newRegions) throws IOException { - return createFsLayout(env, hTableDescriptor, newRegions, new CreateHdfsRegions() { + return createTableOnStorage(env, hTableDescriptor, newRegions, new CreateStorageRegions() { @Override - public List createHdfsRegions(final MasterProcedureEnv env, - final Path tableRootDir, final TableName tableName, - final List newRegions) throws IOException { + public List createRegionsOnStorage(final MasterProcedureEnv env, + final TableName tableName, final List newRegions) throws IOException { HRegionInfo[] regions = newRegions != null ? newRegions.toArray(new HRegionInfo[newRegions.size()]) : null; - //TODO this should be RegionStorage - return ModifyRegionUtils.createRegions(env.getMasterConfiguration(), - tableRootDir, hTableDescriptor, regions, null); + return ModifyRegionUtils.createRegions(env.getMasterConfiguration(), hTableDescriptor, + regions, null); } }); } - protected static List createFsLayout(final MasterProcedureEnv env, + protected static List createTableOnStorage(final MasterProcedureEnv env, final HTableDescriptor hTableDescriptor, List newRegions, - final CreateHdfsRegions hdfsRegionHandler) throws IOException { - final MasterStorage ms = env.getMasterServices().getMasterStorage(); - final Path tempdir = ((LegacyPathIdentifier) ms.getTempContainer()).path; + final CreateStorageRegions storageRegionHandler) throws IOException { + final MasterStorage ms = + env.getMasterServices().getMasterStorage(); - // 1. Create Table Descriptor + // 1. Delete existing artifacts (dir, files etc) for the table + ms.deleteTable(hTableDescriptor.getTableName()); + + // 2. Create Table Descriptor // using a copy of descriptor, table will be created enabling first HTableDescriptor underConstruction = new HTableDescriptor(hTableDescriptor); - final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName()); - ms.createTableDescriptor(StorageContext.TEMP, underConstruction, false); - - // 2. Create Regions - newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir, - hTableDescriptor.getTableName(), newRegions); + ms.createTableDescriptor(underConstruction, true); - // 3. Move Table temp directory to the hbase root location - moveTempDirectoryToHBaseRoot(env, hTableDescriptor, tempTableDir); + // 3. Create Regions + newRegions = storageRegionHandler.createRegionsOnStorage(env, hTableDescriptor.getTableName(), + newRegions); return newRegions; } - protected static void moveTempDirectoryToHBaseRoot( - final MasterProcedureEnv env, - final HTableDescriptor hTableDescriptor, - final Path tempTableDir) throws IOException { - final MasterStorage ms = env.getMasterServices().getMasterStorage(); - final Path tableDir = FSUtils.getTableDir(((LegacyPathIdentifier)ms.getRootContainer()).path, - hTableDescriptor.getTableName()); - FileSystem fs = ms.getFileSystem(); - - if (!fs.delete(tableDir, true) && fs.exists(tableDir)) { - throw new IOException("Couldn't delete " + tableDir); - } - if (!fs.rename(tempTableDir, tableDir)) { - throw new IOException("Unable to move table from temp=" + tempTableDir + - " to hbase root=" + tableDir); - } - } - protected static List addTableToMeta(final MasterProcedureEnv env, final HTableDescriptor hTableDescriptor, final List regions) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java index d471eda..ffeeb98 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteTableProcedure.java @@ -26,15 +26,11 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotDisabledException; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Delete; @@ -44,17 +40,12 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.fs.MasterStorage; import org.apache.hadoop.hbase.exceptions.HBaseException; -import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier; import org.apache.hadoop.hbase.master.AssignmentManager; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; -import org.apache.hadoop.hbase.mob.MobConstants; -import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DeleteTableState; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.util.FSUtils; @InterfaceAudience.Private public class DeleteTableProcedure @@ -113,8 +104,8 @@ public class DeleteTableProcedure setNextState(DeleteTableState.DELETE_TABLE_CLEAR_FS_LAYOUT); break; case DELETE_TABLE_CLEAR_FS_LAYOUT: - LOG.debug("delete '" + getTableName() + "' from filesystem"); - DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true); + LOG.debug("delete '" + getTableName() + "' from storage"); + DeleteTableProcedure.deleteFromStorage(env, getTableName(), regions, true); setNextState(DeleteTableState.DELETE_TABLE_UPDATE_DESC_CACHE); regions = null; break; @@ -259,79 +250,11 @@ public class DeleteTableProcedure } } - protected static void deleteFromFs(final MasterProcedureEnv env, - final TableName tableName, final List regions, - final boolean archive) throws IOException { + protected static void deleteFromStorage(final MasterProcedureEnv env, + final TableName tableName, final List regions, final boolean archive) + throws IOException { final MasterStorage ms = env.getMasterServices().getMasterStorage(); - final FileSystem fs = ms.getFileSystem(); - final Path tempdir = ((LegacyPathIdentifier)ms.getTempContainer()).path; - - final Path tableDir = FSUtils.getTableDir(((LegacyPathIdentifier)ms.getRootContainer()).path, - tableName); - final Path tempTableDir = FSUtils.getTableDir(tempdir, tableName); - - if (fs.exists(tableDir)) { - // Ensure temp exists - if (!fs.exists(tempdir) && !fs.mkdirs(tempdir)) { - throw new IOException("HBase temp directory '" + tempdir + "' creation failure."); - } - - // Ensure parent exists - if (!fs.exists(tempTableDir.getParent()) && !fs.mkdirs(tempTableDir.getParent())) { - throw new IOException("HBase temp directory '" + tempdir + "' creation failure."); - } - - // Move the table in /hbase/.tmp - if (!fs.rename(tableDir, tempTableDir)) { - if (fs.exists(tempTableDir)) { - // TODO - // what's in this dir? something old? probably something manual from the user... - // let's get rid of this stuff... - FileStatus[] files = fs.listStatus(tempdir); - if (files != null && files.length > 0) { - for (int i = 0; i < files.length; ++i) { - if (!files[i].isDir()) continue; - HFileArchiver.archiveRegion(fs, ((LegacyPathIdentifier) ms.getRootContainer()).path, - tempTableDir, files[i].getPath()); - } - } - fs.delete(tempdir, true); - } - throw new IOException("Unable to move '" + tableDir + "' to temp '" + tempTableDir + "'"); - } - } - - // Archive regions from FS (temp directory) - if (archive) { - for (HRegionInfo hri : regions) { - LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS"); - HFileArchiver.archiveRegion(fs, ((LegacyPathIdentifier) ms.getRootContainer()).path, - tempTableDir, HRegion.getRegionDir(tempTableDir, hri.getEncodedName())); - } - LOG.debug("Table '" + tableName + "' archived!"); - } - - // Archive mob data - Path mobTableDir = FSUtils.getTableDir(new Path(((LegacyPathIdentifier) ms.getRootContainer()) - .path, MobConstants.MOB_DIR_NAME), tableName); - Path regionDir = - new Path(mobTableDir, MobUtils.getMobRegionInfo(tableName).getEncodedName()); - if (fs.exists(regionDir)) { - HFileArchiver.archiveRegion(fs, ((LegacyPathIdentifier) ms.getRootContainer()).path, - mobTableDir, regionDir); - } - - // Delete table directory from FS (temp directory) - if (!fs.delete(tempTableDir, true) && fs.exists(tempTableDir)) { - throw new IOException("Couldn't delete " + tempTableDir); - } - - // Delete the table directory where the mob files are saved - if (mobTableDir != null && fs.exists(mobTableDir)) { - if (!fs.delete(mobTableDir, true)) { - throw new IOException("Couldn't delete mob dir " + mobTableDir); - } - } + ms.deleteTable(tableName, archive); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java index 2e53e6f..7087096 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DisableTableProcedure.java @@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.master.procedure; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; -import java.security.PrivilegedExceptionAction; import java.util.List; import java.util.concurrent.ExecutorService; @@ -45,7 +44,6 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos; import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.DisableTableState; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.htrace.Trace; @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java index 9c373bd..fdfa174 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RestoreSnapshotProcedure.java @@ -30,8 +30,6 @@ import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; @@ -42,7 +40,7 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; import org.apache.hadoop.hbase.fs.MasterStorage; -import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier; +import org.apache.hadoop.hbase.fs.StorageIdentifier; import org.apache.hadoop.hbase.master.MetricsSnapshot; import org.apache.hadoop.hbase.master.RegionStates; import org.apache.hadoop.hbase.monitoring.MonitoredTask; @@ -54,7 +52,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProcedureProtos.RestoreS import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.RestoreSnapshotHelper; -import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.SnapshotManifest; import org.apache.hadoop.hbase.util.Pair; @@ -334,12 +331,7 @@ public class RestoreSnapshotProcedure if (!getTableName().isSystemTable()) { // Table already exist. Check and update the region quota for this table namespace. - final MasterStorage ms = env.getMasterServices().getMasterStorage(); - SnapshotManifest manifest = SnapshotManifest.open( - env.getMasterConfiguration(), - ms.getFileSystem(), - SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, ((LegacyPathIdentifier) ms - .getRootContainer()).path), snapshot); + SnapshotManifest manifest = SnapshotManifest.open(env.getMasterConfiguration(), snapshot); int snapshotRegionCount = manifest.getRegionManifestsMap().size(); int tableRegionCount = ProcedureSyncWait.getMasterQuotaManager(env).getRegionCountOfTable(tableName); @@ -366,26 +358,17 @@ public class RestoreSnapshotProcedure * @throws IOException **/ private void restoreSnapshot(final MasterProcedureEnv env) throws IOException { - MasterStorage fileSystemManager = env.getMasterServices().getMasterStorage(); - FileSystem fs = fileSystemManager.getFileSystem(); - Path rootDir = ((LegacyPathIdentifier) fileSystemManager.getRootContainer()).path; final ForeignExceptionDispatcher monitorException = new ForeignExceptionDispatcher(); LOG.info("Starting restore snapshot=" + ClientSnapshotDescriptionUtils.toString(snapshot)); try { - Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshot, rootDir); - SnapshotManifest manifest = SnapshotManifest.open( - env.getMasterServices().getConfiguration(), fs, snapshotDir, snapshot); + SnapshotManifest manifest = SnapshotManifest.open(env.getMasterServices().getConfiguration(), + snapshot); RestoreSnapshotHelper restoreHelper = new RestoreSnapshotHelper( - env.getMasterServices().getConfiguration(), - fs, - manifest, - modifiedHTableDescriptor, - rootDir, - monitorException, - getMonitorStatus()); - - RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreHdfsRegions(); + env.getMasterServices().getConfiguration(), manifest, modifiedHTableDescriptor, + monitorException, getMonitorStatus()); + + RestoreSnapshotHelper.RestoreMetaChanges metaChanges = restoreHelper.restoreStorageRegions(); regionsToRestore = metaChanges.getRegionsToRestore(); regionsToRemove = metaChanges.getRegionsToRemove(); regionsToAdd = metaChanges.getRegionsToAdd(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java index 95c3192..f87c8a7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TruncateTableProcedure.java @@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.master.procedure; import java.io.InputStream; import java.io.IOException; import java.io.OutputStream; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -103,7 +102,7 @@ public class TruncateTableProcedure setNextState(TruncateTableState.TRUNCATE_TABLE_CLEAR_FS_LAYOUT); break; case TRUNCATE_TABLE_CLEAR_FS_LAYOUT: - DeleteTableProcedure.deleteFromFs(env, getTableName(), regions, true); + DeleteTableProcedure.deleteFromStorage(env, getTableName(), regions, true); if (!preserveSplits) { // if we are not preserving splits, generate a new single region regions = Arrays.asList(ModifyRegionUtils.createHRegionInfos(hTableDescriptor, null)); @@ -113,7 +112,7 @@ public class TruncateTableProcedure setNextState(TruncateTableState.TRUNCATE_TABLE_CREATE_FS_LAYOUT); break; case TRUNCATE_TABLE_CREATE_FS_LAYOUT: - regions = CreateTableProcedure.createFsLayout(env, hTableDescriptor, regions); + regions = CreateTableProcedure.createTableOnStorage(env, hTableDescriptor, regions); CreateTableProcedure.updateTableDescCache(env, getTableName()); setNextState(TruncateTableState.TRUNCATE_TABLE_ADD_TO_META); break; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 6d46cba..d0a4c94 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -1744,7 +1744,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi return fs.getFileSystem(); } - /** @return the {@link HRegionStorage} used by this region */ + /** @return the {@link RegionStorage} used by this region */ public RegionStorage getRegionStorage() { return this.fs; } @@ -6394,12 +6394,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi return createHRegion(conf, hTableDescriptor, info, wal, true); } - /** - * TODO remove after refactoring ModifyRegionUtils to use a RegionStorage impl instead of specifying a different root dir manually. - */ - public static HRegion createHRegion(final Configuration conf, final Path rootDir, final HTableDescriptor htd, final HRegionInfo info) throws IOException { - RegionStorage rfs = RegionStorage.open(conf, rootDir.getFileSystem(conf), new LegacyPathIdentifier(rootDir), info, true); - return HRegion.newHRegion(rfs, htd, null, null); + public static HRegion createHRegion(final Configuration conf, final HTableDescriptor htd, + final HRegionInfo info) throws IOException { + RegionStorage rs = RegionStorage.open(conf, info, true); + return HRegion.newHRegion(rs, htd, null, null); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java index 81baa82..2d9d3d7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java @@ -125,40 +125,28 @@ public class RestoreSnapshotHelper { private final TableName snapshotTable; private final HTableDescriptor tableDesc; - private final Path rootDir; private final Path tableDir; private final Configuration conf; private final FileSystem fs; private final boolean createBackRefs; - public RestoreSnapshotHelper(final Configuration conf, - final FileSystem fs, - final SnapshotManifest manifest, - final HTableDescriptor tableDescriptor, - final Path rootDir, - final ForeignExceptionDispatcher monitor, - final MonitoredTask status) { - this(conf, fs, manifest, tableDescriptor, rootDir, monitor, status, true); + public RestoreSnapshotHelper(final Configuration conf, final SnapshotManifest manifest, + final HTableDescriptor tableDescriptor, final ForeignExceptionDispatcher monitor, + final MonitoredTask status) throws IOException { + this(conf, manifest, tableDescriptor, monitor, status, true); } - public RestoreSnapshotHelper(final Configuration conf, - final FileSystem fs, - final SnapshotManifest manifest, - final HTableDescriptor tableDescriptor, - final Path rootDir, - final ForeignExceptionDispatcher monitor, - final MonitoredTask status, - final boolean createBackRefs) - { - this.fs = fs; + public RestoreSnapshotHelper(final Configuration conf, final SnapshotManifest manifest, + final HTableDescriptor tableDescriptor, final ForeignExceptionDispatcher monitor, + final MonitoredTask status, final boolean createBackRefs) throws IOException { + this.fs = FSUtils.getCurrentFileSystem(conf); this.conf = conf; this.snapshotManifest = manifest; this.snapshotDesc = manifest.getSnapshotDescription(); this.snapshotTable = TableName.valueOf(snapshotDesc.getTable()); this.tableDesc = tableDescriptor; - this.rootDir = rootDir; - this.tableDir = FSUtils.getTableDir(rootDir, tableDesc.getTableName()); + this.tableDir = FSUtils.getTableDir(FSUtils.getRootDir(conf), tableDesc.getTableName()); this.monitor = monitor; this.status = status; this.createBackRefs = createBackRefs; @@ -168,7 +156,7 @@ public class RestoreSnapshotHelper { * Restore the on-disk table to a specified snapshot state. * @return the set of regions touched by the restore operation */ - public RestoreMetaChanges restoreHdfsRegions() throws IOException { + public RestoreMetaChanges restoreStorageRegions() throws IOException { ThreadPoolExecutor exec = SnapshotManifest.createExecutor(conf, "RestoreSnapshot"); try { return restoreHdfsRegions(exec); @@ -588,8 +576,8 @@ public class RestoreSnapshotHelper { } // create the regions on disk - ModifyRegionUtils.createRegions(exec, conf, rootDir, - tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() { + ModifyRegionUtils.createRegions(exec, conf, + tableDesc, clonedRegionsInfo, new ModifyRegionUtils.RegionFillTask() { @Override public void fillRegion(final HRegion region) throws IOException { HRegionInfo snapshotHri = snapshotRegions.get(region.getRegionInfo().getEncodedName()); @@ -822,9 +810,9 @@ public class RestoreSnapshotHelper { // we send createBackRefs=false so that restored hfiles do not create back reference links // in the base hbase root dir. - RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, fs, - manifest, manifest.getTableDescriptor(), restoreDir, monitor, status, false); - RestoreMetaChanges metaChanges = helper.restoreHdfsRegions(); // TODO: parallelize. + RestoreSnapshotHelper helper = new RestoreSnapshotHelper(conf, + manifest, manifest.getTableDescriptor(), monitor, status, false); + RestoreMetaChanges metaChanges = helper.restoreStorageRegions(); // TODO: parallelize. if (LOG.isDebugEnabled()) { LOG.debug("Restored table dir:" + restoreDir); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java index 3fa715a..572bc04 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.fs.RegionStorage; +import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier; import org.apache.hadoop.hbase.fs.legacy.LegacyTableDescriptor; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare; import org.apache.hadoop.hbase.mob.MobUtils; @@ -58,6 +59,7 @@ import org.apache.hadoop.hbase.regionserver.StoreFileInfo; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Threads; +import org.apache.zookeeper.server.persistence.SnapShot; /** * Utility class to help read/write the Snapshot Manifest. @@ -131,6 +133,12 @@ public final class SnapshotManifest { return manifest; } + public static SnapshotManifest open(final Configuration conf, final SnapshotDescription desc) + throws IOException { + Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(desc, + FSUtils.getRootDir(conf)); + return open(conf, snapshotDir.getFileSystem(conf), snapshotDir, desc); + } /** * Add the table descriptor to the snapshot manifest diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java index b670a26..9e6eeff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java @@ -34,10 +34,8 @@ import java.util.concurrent.TimeUnit; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -90,13 +88,12 @@ public abstract class ModifyRegionUtils { * NOTE: that you should add the regions to hbase:meta after this operation. * * @param conf {@link Configuration} - * @param rootDir Root directory for HBase instance * @param hTableDescriptor description of the table * @param newRegions {@link HRegionInfo} that describes the regions to create * @param task {@link RegionFillTask} custom code to populate region after creation * @throws IOException */ - public static List createRegions(final Configuration conf, final Path rootDir, + public static List createRegions(final Configuration conf, final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions, final RegionFillTask task) throws IOException { if (newRegions == null) return null; @@ -104,7 +101,7 @@ public abstract class ModifyRegionUtils { ThreadPoolExecutor exec = getRegionOpenAndInitThreadPool(conf, "RegionOpenAndInitThread-" + hTableDescriptor.getTableName(), regionNumber); try { - return createRegions(exec, conf, rootDir, hTableDescriptor, newRegions, task); + return createRegions(exec, conf, hTableDescriptor, newRegions, task); } finally { exec.shutdownNow(); } @@ -116,16 +113,14 @@ public abstract class ModifyRegionUtils { * * @param exec Thread Pool Executor * @param conf {@link Configuration} - * @param rootDir Root directory for HBase instance * @param hTableDescriptor description of the table * @param newRegions {@link HRegionInfo} that describes the regions to create * @param task {@link RegionFillTask} custom code to populate region after creation * @throws IOException */ public static List createRegions(final ThreadPoolExecutor exec, - final Configuration conf, final Path rootDir, - final HTableDescriptor hTableDescriptor, final HRegionInfo[] newRegions, - final RegionFillTask task) throws IOException { + final Configuration conf, final HTableDescriptor hTableDescriptor, + final HRegionInfo[] newRegions, final RegionFillTask task) throws IOException { if (newRegions == null) return null; int regionNumber = newRegions.length; CompletionService completionService = @@ -135,7 +130,7 @@ public abstract class ModifyRegionUtils { completionService.submit(new Callable() { @Override public HRegionInfo call() throws IOException { - return createRegion(conf, rootDir, hTableDescriptor, newRegion, task); + return createRegion(conf, hTableDescriptor, newRegion, task); } }); } @@ -156,21 +151,22 @@ public abstract class ModifyRegionUtils { /** * Create new set of regions on the specified file-system. * @param conf {@link Configuration} - * @param rootDir Root directory for HBase instance * @param hTableDescriptor description of the table * @param newRegion {@link HRegionInfo} that describes the region to create * @param task {@link RegionFillTask} custom code to populate region after creation * @throws IOException */ - public static HRegionInfo createRegion(final Configuration conf, final Path rootDir, + public static HRegionInfo createRegion(final Configuration conf, final HTableDescriptor hTableDescriptor, final HRegionInfo newRegion, final RegionFillTask task) throws IOException { // 1. Create HRegion // The WAL subsystem will use the default rootDir rather than the passed in rootDir // unless I pass along via the conf. - Configuration confForWAL = new Configuration(conf); - confForWAL.set(HConstants.HBASE_DIR, rootDir.toString()); - HRegion region = HRegion.createHRegion(conf, rootDir, hTableDescriptor, newRegion); + + // TODO this is not required when WAL will start using MasterStorage/ RegionStorage APIs + //Configuration confForWAL = new Configuration(conf); + //confForWAL.set(HConstants.HBASE_DIR, rootDir.toString()); + HRegion region = HRegion.createHRegion(conf, hTableDescriptor, newRegion); try { // 2. Custom user code to interact with the created region if (task != null) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java index a251c1c..4011af5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java @@ -154,7 +154,7 @@ public class TestRestoreSnapshotHelper { new FSTableDescriptors(conf).createTableDescriptor(htdClone); RestoreSnapshotHelper helper = getRestoreHelper(rootDir, snapshotDir, sd, htdClone); - helper.restoreHdfsRegions(); + helper.restoreStorageRegions(); LOG.debug("post-restore table=" + htdClone.getTableName() + " snapshot=" + snapshotDir); FSUtils.logFileSystemState(fs, rootDir, LOG); @@ -169,8 +169,8 @@ public class TestRestoreSnapshotHelper { MonitoredTask status = Mockito.mock(MonitoredTask.class); SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, sd); - return new RestoreSnapshotHelper(conf, fs, manifest, - htdClone, rootDir, monitor, status); + return new RestoreSnapshotHelper(conf, manifest, + htdClone, monitor, status); } private Path getReferredToFile(final String referenceName) { -- 2.7.4 (Apple Git-66)