Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java (revision 1349672) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java (working copy) @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.TrashTableCleaner; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.zookeeper.KeeperException; @@ -59,6 +60,9 @@ AssignmentManager am = this.masterServices.getAssignmentManager(); long waitTime = server.getConfiguration(). getLong("hbase.master.wait.on.region", 5 * 60 * 1000); + boolean trashEnabled = server.getConfiguration().getBoolean( + "hbase.master.trashtable.enable", true); + String trashTableName = TrashTableCleaner.getTrashTableName(tableName); for (HRegionInfo region : regions) { long done = System.currentTimeMillis() + waitTime; while (System.currentTimeMillis() < done) { @@ -76,9 +80,20 @@ " from META and FS"); // Remove region from META MetaEditor.deleteRegion(this.server.getCatalogTracker(), region); - // Delete region from FS - this.masterServices.getMasterFileSystem().deleteRegion(region); + if (trashEnabled) { + // Move region to trash from FS + this.masterServices.getMasterFileSystem().moveRegionToTrash(region, + trashTableName); + } else { + // Delete region from FS + this.masterServices.getMasterFileSystem().deleteRegion(region); + } } + if (trashEnabled) { + // Move table info files to trash from FS + this.masterServices.getMasterFileSystem().moveAllFilesToTrash(tableName, + trashTableName); + } // Delete table from FS this.masterServices.getMasterFileSystem().deleteTable(tableName); // Update table descriptor cache Index: hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java =================================================================== --- hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java (revision 1349672) +++ hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java (working copy) @@ -228,6 +228,9 @@ /** Like the previous, but for old logs that are about to be deleted */ public static final String HREGION_OLDLOGDIR_NAME = ".oldlogs"; + + /** Like the previous, but for trash tables that are about to be deleted */ + public static final String HTABLE_TRASHTABLEDIR_NAME = ".trashtables"; /** Used to construct the name of the compaction directory during compaction */ public static final String HREGION_COMPACTIONDIR_NAME = "compaction.dir"; Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (revision 1349672) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (working copy) @@ -266,6 +266,7 @@ private CatalogJanitor catalogJanitorChore; private LogCleaner logCleaner; + private TrashTableCleaner trashTableCleaner; private MasterCoprocessorHost cpHost; private final ServerName serverName; @@ -954,6 +955,13 @@ getMasterFileSystem().getOldLogDir()); Threads.setDaemonThreadRunning(logCleaner.getThread(), n + ".oldLogCleaner"); + this.trashTableCleaner = new TrashTableCleaner(conf.getInt( + "hbase.master.trashtable.interval", 3600 * 1000), this, conf, + getMasterFileSystem().getFileSystem(), getMasterFileSystem() + .getTrashTableDir()); + Threads.setDaemonThreadRunning(trashTableCleaner.getThread(), n + + ".trashTableCleaner"); + // Put up info server. int port = this.conf.getInt(HConstants.MASTER_INFO_PORT, 60010); if (port >= 0) { @@ -989,6 +997,7 @@ this.rpcServerOpen = false; // Clean up and close up shop if (this.logCleaner!= null) this.logCleaner.interrupt(); + if (this.trashTableCleaner!= null) this.trashTableCleaner.interrupt(); if (this.infoServer != null) { LOG.info("Stopping infoServer"); try { Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/TrashTableCleaner.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/TrashTableCleaner.java (revision 0) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/TrashTableCleaner.java (revision 0) @@ -0,0 +1,109 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.io.IOException; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Chore; +import org.apache.hadoop.hbase.RemoteExceptionHandler; +import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.util.StringUtils; + +/** + * This Chore, everytime it runs, will clear the tables in the trash tables + * directory. + */ +public class TrashTableCleaner extends Chore { + private static final Log LOG = LogFactory.getLog(TrashTableCleaner.class + .getName()); + public static final String TRASH_TABLE_NAME_SEPARATOR = ","; + private static final Pattern TRASH_TABLE_NAME_PATTERN = Pattern + .compile("[0-9]+" + TRASH_TABLE_NAME_SEPARATOR + ".+"); + + private final FileSystem fs; + // Directory where keeps dropped tables' data in + private final Path trashTableDir; + private final long keepTime; + + public TrashTableCleaner(int period, Stoppable stopper, Configuration conf, + FileSystem fs, Path trashTableDir) { + super("TrashTableCleaner", period, stopper); + this.fs = fs; + this.trashTableDir = trashTableDir; + this.keepTime = conf.getLong("hbase.master.trashtable.keeptime", + 24 * 3600 * 1000l); + } + + @Override + protected void chore() { + try { + FileStatus[] files = this.fs.listStatus(this.trashTableDir); + if (files == null) + return; + for (FileStatus file : files) { + Path filePath = file.getPath(); + if (validateTrashTableName(filePath.getName())) { + // clear tables if keep time out + long trashTime = getTrashTimeFromName(filePath.getName()); + if (System.currentTimeMillis() - trashTime > keepTime) { + LOG.info("Deleting trash table " + filePath + + ", since we have kept it " + + StringUtils.formatTime(System.currentTimeMillis() - trashTime) + + ", keepTime = " + StringUtils.formatTime(this.keepTime)); + fs.delete(filePath, true); + } + } else { + LOG.warn("Found a wrong trash table path: " + filePath); + } + } + + } catch (IOException e) { + e = RemoteExceptionHandler.checkIOException(e); + LOG.warn("Error while clean the trash tables", e); + } + } + + private long getTrashTimeFromName(String trashTableName) { + long trashTime = Long.parseLong(trashTableName + .split(TRASH_TABLE_NAME_SEPARATOR)[0]); + return trashTime; + } + + public static boolean validateTrashTableName(String trashTableName) { + Matcher m = TRASH_TABLE_NAME_PATTERN.matcher(trashTableName); + return m.matches(); + } + + public static String getTrashTableName(String originTableName) { + return System.currentTimeMillis() + TRASH_TABLE_NAME_SEPARATOR + + originTableName; + } + + public static String getTrashTableName(byte[] originTableName) { + return getTrashTableName(Bytes.toString(originTableName)); + } +} Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTrashTableCleaner.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTrashTableCleaner.java (revision 0) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTrashTableCleaner.java (revision 0) @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(SmallTests.class) +public class TestTrashTableCleaner { + final Log LOG = LogFactory.getLog(getClass()); + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private HBaseAdmin admin; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.getConfiguration().setBoolean("hbase.master.trashtable.enable", + true); + TEST_UTIL.getConfiguration().setInt("hbase.master.trashtable.interval", + 2000); + TEST_UTIL.getConfiguration().setLong("hbase.master.trashtable.keeptime", + 10 * 1000); + TEST_UTIL.startMiniCluster(3); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void setUp() throws Exception { + this.admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); + } + + @Test(timeout = 50000) + public void testTrashTableCleaner() throws Exception { + final byte[] row = Bytes.toBytes("row"); + final byte[] qualifier = Bytes.toBytes("qualifier"); + final byte[] value = Bytes.toBytes("value"); + final byte[] table = Bytes.toBytes("testTrashTableCleaner"); + HTable ht = TEST_UTIL.createTable(table, HConstants.CATALOG_FAMILY); + + Put put = new Put(row); + put.add(HConstants.CATALOG_FAMILY, qualifier, value); + ht.put(put); + Get get = new Get(row); + get.addColumn(HConstants.CATALOG_FAMILY, qualifier); + ht.get(get); + + this.admin.disableTable(table); + this.admin.deleteTable(table); + + MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster() + .getMasterFileSystem(); + + assertTrue(mfs.getFileSystem().exists(mfs.getTrashTableDir())); + + // one trash table + FileStatus[] files = mfs.getFileSystem().listStatus(mfs.getTrashTableDir()); + assertEquals(1, files.length); + + // one region in trash table + Path trashTablePath = files[0].getPath(); + FileStatus[] fileStatuses = mfs.getFileSystem().listStatus(trashTablePath, + new PathFilter() { + public boolean accept(Path path) { + if (path.getName().startsWith(".")) { + return false; + } + return true; + } + }); + assertEquals(1, fileStatuses.length); + + int waitTime = 0; + while (true) { + if (mfs.getFileSystem().exists(trashTablePath)) { + Thread.sleep(100); + waitTime += 100; + if (waitTime > 30 * 1000) { + fail("Wait time =" + waitTime + ", but trash table " + + Bytes.toString(table) + " hasn't been cleaned"); + } + } else { + break; + } + } + } + + @org.junit.Rule + public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu = new org.apache.hadoop.hbase.ResourceCheckerJUnitRule(); +} Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (revision 1349672) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (working copy) @@ -23,7 +23,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Set; -import java.util.UUID; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -53,6 +52,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.Pair; /** * This class abstracts a bunch of operations the HMaster needs to interact with @@ -76,6 +76,8 @@ private volatile boolean fsOk = true; // The Path to the old logs dir private final Path oldLogDir; + // The Path to trash tables dir + private final Path trashTableDir; // root hbase directory on the FS private final Path rootdir; // create the split log lock @@ -113,20 +115,24 @@ } // setup the filesystem variable // set up the archived logs path - this.oldLogDir = createInitialFileSystemLayout(); + // set up the trashed tables path + Pair paths = createInitialFileSystemLayout(); + this.oldLogDir = paths.getFirst(); + this.trashTableDir = paths.getSecond(); } /** * Create initial layout in filesystem. *
    *
  1. Check if the root region exists and is readable, if not create it. - * Create hbase.version and the -ROOT- directory if not one. - *
  2. + * Create hbase.version and the -ROOT- directory if not one. *
  3. Create a log archive directory for RS to put archived logs
  4. + *
  5. Create a table trash directory for MASTER to put dropped tables
  6. *
* Idempotent. + * @return A pair of oldLogDir path and trashTableDir path */ - private Path createInitialFileSystemLayout() throws IOException { + private Pair createInitialFileSystemLayout() throws IOException { // check if the root directory exists checkRootDir(this.rootdir, conf, this.fs); @@ -137,7 +143,13 @@ this.fs.mkdirs(oldLogDir); } - return oldLogDir; + Path trashTableDir = new Path(this.rootdir, HConstants.HTABLE_TRASHTABLEDIR_NAME); + // Make sure MASTER can move dropped tables to trash tables dir + if (!this.fs.exists(trashTableDir)) { + this.fs.mkdirs(trashTableDir); + } + + return new Pair(oldLogDir, trashTableDir); } public FileSystem getFileSystem() { @@ -153,6 +165,14 @@ } /** + * Get the directory where dropped tables go + * @return the dir + */ + public Path getTrashTableDir() { + return this.trashTableDir; + } + + /** * Checks to see if the file system is still accessible. * If not, sets closed * @return false if file system is not available @@ -443,6 +463,47 @@ fs.delete(HRegion.getRegionDir(rootdir, region), true); } + /** + * Move dropped table's region to trash table directory. + * @param region + * @param trashTableName + * @throws IOException + */ + public void moveRegionToTrash(HRegionInfo region, String trashTableName) + throws IOException { + Path trashTablePath = new Path(trashTableDir, trashTableName); + if (!fs.exists(trashTablePath)) { + fs.mkdirs(trashTablePath); + } + Path trashRegionPath = new Path(trashTablePath, region.getEncodedName()); + if (!fs.rename(HRegion.getRegionDir(rootdir, region), trashRegionPath)) { + String msg = "Failed moving region " + region.getRegionNameAsString() + + " to trash "; + LOG.warn(msg); + throw new IOException(msg); + } + } + + /** + * Move all dropped table's files from table directory to trash table + * directory, e.g. table info files + * @param tableName + * @param trashTableName + * @throws IOException + */ + public void moveAllFilesToTrash(byte[] tableName, String trashTableName) + throws IOException { + Path tableDir = HTableDescriptor.getTableDir(rootdir, tableName); + Path trashTablePath = new Path(trashTableDir, trashTableName); + FileStatus[] files = fs.listStatus(tableDir); + for (FileStatus file : files) { + if (!fs.rename(file.getPath(), new Path(trashTablePath, file.getPath() + .getName()))) { + LOG.warn("Failed moving " + file.getPath() + " to trash"); + } + } + } + public void deleteTable(byte[] tableName) throws IOException { fs.delete(new Path(rootdir, Bytes.toString(tableName)), true); }