Index: hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHFileArchiveUtil.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHFileArchiveUtil.java (revision 1442469) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHFileArchiveUtil.java (working copy) @@ -50,9 +50,10 @@ @Test public void testRegionArchiveDir() { + Configuration conf = null; Path tableDir = new Path("table"); Path regionDir = new Path("region"); - assertNotNull(HFileArchiveUtil.getRegionArchiveDir(null, tableDir, regionDir)); + assertNotNull(HFileArchiveUtil.getRegionArchiveDir(conf, tableDir, regionDir)); } @Test Index: hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java (revision 1442469) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java (working copy) @@ -110,7 +110,7 @@ // make sure the regiondir lives under the tabledir Preconditions.checkArgument(regionDir.toString().startsWith(tableDir.toString())); - Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(conf, tableDir, regionDir); + Path regionArchiveDir = HFileArchiveUtil.getRegionArchiveDir(rootdir, tableDir, regionDir); LOG.debug("Have an archive directory, preparing to move files"); FileStatusConverter getAsFile = new FileStatusConverter(fs); Index: hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java (revision 1442469) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/util/HFileArchiveUtil.java (working copy) @@ -99,6 +99,24 @@ } /** + * Get the archive directory for a given region under the specified table + * @param rootdir {@link Path} to the root directory where hbase files are stored (for building + * the archive path) + * @param tabledir the original table directory. Cannot be null. + * @param regiondir the path to the region directory. Cannot be null. + * @return {@link Path} to the directory to archive the given region, or null if it + * should not be archived + */ + public static Path getRegionArchiveDir(Path rootdir, Path tabledir, Path regiondir) { + // get the archive directory for a table + Path archiveDir = getTableArchivePath(rootdir, tabledir.getName()); + + // then add on the region path under the archive + String encodedRegionName = regiondir.getName(); + return HRegion.getRegionDir(archiveDir, encodedRegionName); + } + + /** * Get the path to the table archive directory based on the configured archive directory. *
* Get the path to the table's archive directory. @@ -109,12 +127,27 @@ */ public static Path getTableArchivePath(Path tabledir) { Path root = tabledir.getParent(); - return new Path(new Path(root,HConstants.HFILE_ARCHIVE_DIRECTORY), tabledir.getName()); + return getTableArchivePath(root, tabledir.getName()); } /** * Get the path to the table archive directory based on the configured archive directory. *
+ * Get the path to the table's archive directory. + *
+ * Generally of the form: /hbase/.archive/[tablename] + * @param rootdir {@link Path} to the root directory where hbase files are stored (for building + * the archive path) + * @param tableName Name of the table to be archived. Cannot be null. + * @return {@link Path} to the archive directory for the table + */ + public static Path getTableArchivePath(final Path rootdir, final String tableName) { + return new Path(getArchivePath(rootdir), tableName); + } + + /** + * Get the path to the table archive directory based on the configured archive directory. + *
* Assumed that the table should already be archived.
* @param conf {@link Configuration} to read the archive directory property. Can be null
* @param tableName Name of the table to be archived. Cannot be null.
@@ -133,6 +166,16 @@
* @throws IOException if an unexpected error occurs
*/
public static Path getArchivePath(Configuration conf) throws IOException {
- return new Path(FSUtils.getRootDir(conf), HConstants.HFILE_ARCHIVE_DIRECTORY);
+ return getArchivePath(FSUtils.getRootDir(conf));
}
+
+ /**
+ * Get the full path to the archive directory on the configured {@link FileSystem}
+ * @param rootdir {@link Path} to the root directory where hbase files are stored (for building
+ * the archive path)
+ * @return the full {@link Path} to the archive directory, as defined by the configuration
+ */
+ private static Path getArchivePath(final Path rootdir) {
+ return new Path(rootdir, HConstants.HFILE_ARCHIVE_DIRECTORY);
+ }
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (revision 1442469)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (working copy)
@@ -79,6 +79,8 @@
private final Path oldLogDir;
// root hbase directory on the FS
private final Path rootdir;
+ // hbase temp directory used for table construction and deletion
+ private final Path tempdir;
// create the split log lock
final Lock splitLogLock = new ReentrantLock();
final boolean distributedLogSplitting;
@@ -109,6 +111,7 @@
// default localfs. Presumption is that rootdir is fully-qualified before
// we get to here with appropriate fs scheme.
this.rootdir = FSUtils.getRootDir(conf);
+ this.tempdir = new Path(this.rootdir, HConstants.HBASE_TEMP_DIRECTORY);
// Cover both bases, the old way of setting default fs and the new.
// We're supposed to run on 0.20 and 0.21 anyways.
this.fs = this.rootdir.getFileSystem(conf);
@@ -146,6 +149,9 @@
// check if the root directory exists
checkRootDir(this.rootdir, conf, this.fs);
+ // check if temp directory exists and clean it
+ checkTempDir(this.tempdir, conf, this.fs);
+
Path oldLogDir = new Path(this.rootdir, HConstants.HREGION_OLDLOGDIR_NAME);
// Make sure the region servers can archive their old logs
@@ -194,6 +200,13 @@
}
/**
+ * @return HBase temp dir.
+ */
+ public Path getTempDir() {
+ return this.tempdir;
+ }
+
+ /**
* @return The unique identifier generated for this cluster
*/
public ClusterId getClusterId() {
@@ -439,6 +452,32 @@
return rd;
}
+ /**
+ * Make sure the hbase temp directory exists and is empty.
+ * NOTE that this method is only executed once just after the master becomes the active one.
+ */
+ private void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs)
+ throws IOException {
+ // If the temp directory exists, clear the content (left over, from the previous run)
+ if (fs.exists(tmpdir)) {
+ // Archive table in temp, maybe left over from failed deletion,
+ // if not the cleaner will take care of them.
+ for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) {
+ for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) {
+ HFileArchiver.archiveRegion(c, fs, this.rootdir, tabledir, regiondir);
+ }
+ }
+ if (!fs.delete(tmpdir, true)) {
+ throw new IOException("Unable to clean the temp directory: " + tmpdir);
+ }
+ }
+
+ // Create the temp directory
+ if (!fs.mkdirs(tmpdir)) {
+ throw new IOException("HBase temp directory '" + tmpdir + "' creation failure.");
+ }
+ }
+
private static void bootstrap(final Path rd, final Configuration c)
throws IOException {
LOG.info("BOOTSTRAP: creating ROOT and first META regions");
@@ -503,6 +542,37 @@
fs.delete(new Path(rootdir, Bytes.toString(tableName)), true);
}
+ /**
+ * Move the specified file/directory to the hbase temp directory.
+ * @param path The path of the file/directory to move
+ * @return The temp location of the file/directory moved
+ * @throws IOException in case of file-system failure
+ */
+ public Path moveToTemp(final Path path) throws IOException {
+ Path tempPath = new Path(this.tempdir, path.getName());
+
+ // Ensure temp exists
+ if (!fs.exists(tempdir) && !fs.mkdirs(tempdir)) {
+ throw new IOException("HBase temp directory '" + tempdir + "' creation failure.");
+ }
+
+ if (!fs.rename(path, tempPath)) {
+ throw new IOException("Unable to move '" + path + "' to temp '" + tempPath + "'");
+ }
+
+ return tempPath;
+ }
+
+ /**
+ * Move the specified table to the hbase temp directory
+ * @param tableName Table name to move
+ * @return The temp location of the table moved
+ * @throws IOException in case of file-system failure
+ */
+ public Path moveTableToTemp(byte[] tableName) throws IOException {
+ return moveToTemp(HTableDescriptor.getTableDir(this.rootdir, tableName));
+ }
+
public void updateRegionInfo(HRegionInfo region) {
// TODO implement this. i think this is currently broken in trunk i don't
// see this getting updated.
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java (revision 1442469)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java (working copy)
@@ -24,12 +24,16 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.backup.HFileArchiver;
import org.apache.hadoop.hbase.catalog.MetaEditor;
import org.apache.hadoop.hbase.master.AssignmentManager;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
@@ -55,6 +59,8 @@
if (cpHost != null) {
cpHost.preDeleteTableHandler(this.tableName);
}
+
+ // 1. Wait because of region in transition
AssignmentManager am = this.masterServices.getAssignmentManager();
long waitTime = server.getConfiguration().
getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
@@ -71,21 +77,32 @@
waitTime + "ms) for region to leave region " +
region.getRegionNameAsString() + " in transitions");
}
- LOG.debug("Deleting region " + region.getRegionNameAsString() +
- " from META and FS");
- // Remove region from META
- MetaEditor.deleteRegion(this.server.getCatalogTracker(), region);
- // Delete region from FS
- this.masterServices.getMasterFileSystem().deleteRegion(region);
}
- // Delete table from FS
- this.masterServices.getMasterFileSystem().deleteTable(tableName);
- // Update table descriptor cache
+
+ // 2. Remove regions from META
+ LOG.debug("Deleting regions from META");
+ MetaEditor.deleteRegions(this.server.getCatalogTracker(), regions);
+
+ // 3. Move the table in /hbase/.tmp
+ MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
+ Path tempTableDir = mfs.moveTableToTemp(tableName);
+
+ // 4. Update table descriptor cache
this.masterServices.getTableDescriptors().remove(Bytes.toString(tableName));
- // If entry for this table in zk, and up in AssignmentManager, remove it.
+ // 5. If entry for this table in zk, and up in AssignmentManager, remove it.
+ am.getZKTable().setDeletedTable(Bytes.toString(tableName));
- am.getZKTable().setDeletedTable(Bytes.toString(tableName));
+ // 6. Delete regions from FS (temp directory)
+ FileSystem fs = mfs.getFileSystem();
+ for (HRegionInfo hri: regions) {
+ LOG.debug("Deleting region " + hri.getRegionNameAsString() + " from FS");
+ HFileArchiver.archiveRegion(masterServices.getConfiguration(), fs, mfs.getRootDir(),
+ tempTableDir, new Path(tempTableDir, hri.getEncodedName()));
+ }
+ // 7. Delete table from FS (temp directory)
+ fs.delete(tempTableDir, true);
+
if (cpHost != null) {
cpHost.postDeleteTableHandler(this.tableName);
}
Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
===================================================================
--- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java (revision 1442469)
+++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java (working copy)
@@ -21,7 +21,6 @@
import java.io.IOException;
import java.io.InterruptedIOException;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionService;
@@ -36,6 +35,8 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
@@ -88,7 +89,9 @@
}
} catch (InterruptedException e) {
LOG.warn("Interrupted waiting for meta availability", e);
- throw new IOException(e);
+ InterruptedIOException ie = new InterruptedIOException(e.getMessage());
+ ie.initCause(e);
+ throw ie;
}
String tableName = this.hTableDescriptor.getNameAsString();
@@ -143,18 +146,77 @@
}
}
- private void handleCreateTable(String tableName) throws IOException,
- KeeperException {
+ /**
+ * Responsible of table creation (on-disk and META) and assignment.
+ * - Create the table directory and descriptor (temp folder)
+ * - Create the on-disk regions (temp folder)
+ * [If something fails here: we've just some trash in temp]
+ * - Move the table from temp to the root directory
+ * [If something fails here: we've the table in place but some of the rows required
+ * present in META. (hbck needed)]
+ * - Add regions to META
+ * [If something fails here: we don't have regions assigned: table disabled]
+ * - Assign regions to Region Servers
+ * [If something fails here: we still have the table in disabled state]
+ * - Update ZooKeeper with the enabled state
+ */
+ private void handleCreateTable(String tableName) throws IOException, KeeperException {
+ Path tempdir = fileSystemManager.getTempDir();
+ FileSystem fs = fileSystemManager.getFileSystem();
+
+ // 1. Create Table Descriptor
+ FSTableDescriptors.createTableDescriptor(fs, tempdir, this.hTableDescriptor);
+ Path tempTableDir = new Path(tempdir, tableName);
+ Path tableDir = new Path(fileSystemManager.getRootDir(), tableName);
+
+ // 2. Create Regions
+ Listdeletes from the .META. table.
+ * @param ct CatalogTracker on whose back we will ride the edit.
+ * @param deletes Deletes to add to .META.
+ * @throws IOException
+ */
+ static void deleteFromMetaTable(final CatalogTracker ct, final List