Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/TrashCleaner.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/TrashCleaner.java (revision 0) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/TrashCleaner.java (revision 0) @@ -0,0 +1,146 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.Chore; +import org.apache.hadoop.hbase.RemoteExceptionHandler; +import org.apache.hadoop.hbase.Stoppable; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.util.StringUtils; + +/** + * This Chore, everytime it runs, will clear the dropped tables which are are + * older than threshold in the trash directory. + */ +@InterfaceAudience.Private +public class TrashCleaner extends Chore { + private static final Log LOG = LogFactory.getLog(TrashCleaner.class + .getName()); + public static final String TRASH_TABLE_NAME_SEPARATOR = ","; + private static final Pattern TRASH_TABLE_NAME_PATTERN = Pattern + .compile("[0-9]+" + TRASH_TABLE_NAME_SEPARATOR + ".+"); + + private final FileSystem fs; + // Directory where keeps dropped tables' data in + private final Path trashDir; + private final long keepTime; + + public TrashCleaner(int period, Stoppable stopper, Configuration conf, + FileSystem fs, Path trashDir) { + super("TrashCleaner", period, stopper); + this.fs = fs; + this.trashDir = trashDir; + this.keepTime = conf.getLong("hbase.master.trash.keeptime", + 24 * 3600 * 1000l); + } + + @Override + protected void chore() { + try { + FileStatus[] files = this.fs.listStatus(this.trashDir); + if (files == null) + return; + for (FileStatus file : files) { + Path filePath = file.getPath(); + if (validateTrashTableName(filePath.getName())) { + // clear tables if keep time out + long trashTime = getTrashTimeFromName(filePath.getName()); + if (System.currentTimeMillis() - trashTime > keepTime) { + LOG.info("Deleting trash table " + filePath + + ", since we have kept it " + + StringUtils.formatTime(System.currentTimeMillis() - trashTime) + + ", keepTime = " + StringUtils.formatTime(this.keepTime)); + fs.delete(filePath, true); + } + } else { + LOG.warn("Found a wrong trash table path: " + filePath); + } + } + } catch (IOException e) { + e = RemoteExceptionHandler.checkIOException(e); + LOG.warn("Error while clean the trash tables", e); + } + } + + public List getTableNamesInTrash() throws IOException { + List tables = new ArrayList(); + FileStatus[] files = this.fs.listStatus(this.trashDir); + for (FileStatus file : files) { + Path filePath = file.getPath(); + if (validateTrashTableName(filePath.getName())) { + tables.add(filePath.getName().split(TRASH_TABLE_NAME_SEPARATOR)[1]); + } + } + return tables; + } + + /** + * How long we have reserved the table in trash, return -1 if not exist + * @param tableName + * @return reserved time + * @throws IOException + */ + public long getReservedTimeInTrash(final String tableName) + throws IOException { + FileStatus[] files = this.fs.listStatus(this.trashDir, new PathFilter() { + public boolean accept(Path path) { + return path.getName().endsWith(tableName); + } + }); + if (files == null || files.length == 0) { + return -1; + } + long now = System.currentTimeMillis(); + long reservedTime = now - getTrashTimeFromName(files[0].getPath().getName()); + return reservedTime; + } + + private long getTrashTimeFromName(String trashTableName) { + long trashTime = Long.parseLong(trashTableName + .split(TRASH_TABLE_NAME_SEPARATOR)[0]); + return trashTime; + } + + public static boolean validateTrashTableName(String trashTableName) { + Matcher m = TRASH_TABLE_NAME_PATTERN.matcher(trashTableName); + return m.matches(); + } + + public static String getTrashTableName(String originTableName) { + return System.currentTimeMillis() + TRASH_TABLE_NAME_SEPARATOR + + originTableName; + } + + public static String getTrashTableName(byte[] originTableName) { + return getTrashTableName(Bytes.toString(originTableName)); + } +} Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java (revision 1352396) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java (working copy) @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.MasterCoprocessorHost; import org.apache.hadoop.hbase.master.MasterServices; +import org.apache.hadoop.hbase.master.TrashCleaner; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; import org.apache.zookeeper.KeeperException; @@ -59,6 +60,9 @@ AssignmentManager am = this.masterServices.getAssignmentManager(); long waitTime = server.getConfiguration(). getLong("hbase.master.wait.on.region", 5 * 60 * 1000); + boolean trashEnabled = server.getConfiguration().getBoolean( + "hbase.master.trash.enable", true); + String trashTableName = TrashCleaner.getTrashTableName(tableName); for (HRegionInfo region : regions) { long done = System.currentTimeMillis() + waitTime; while (System.currentTimeMillis() < done) { @@ -76,9 +80,20 @@ " from META and FS"); // Remove region from META MetaEditor.deleteRegion(this.server.getCatalogTracker(), region); - // Delete region from FS - this.masterServices.getMasterFileSystem().deleteRegion(region); + if (trashEnabled) { + // Move region to trash from FS + this.masterServices.getMasterFileSystem().moveRegionToTrash(region, + trashTableName); + } else { + // Delete region from FS + this.masterServices.getMasterFileSystem().deleteRegion(region); + } } + if (trashEnabled) { + // Move table info files to trash from FS + this.masterServices.getMasterFileSystem().moveAllFilesToTrash(tableName, + trashTableName); + } // Delete table from FS this.masterServices.getMasterFileSystem().deleteTable(tableName); // Update table descriptor cache Index: hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java =================================================================== --- hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java (revision 1352396) +++ hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java (working copy) @@ -229,6 +229,9 @@ /** Like the previous, but for old logs that are about to be deleted */ public static final String HREGION_OLDLOGDIR_NAME = ".oldlogs"; + /** Like the previous, but for trash tables that are about to be deleted */ + public static final String TRASH_DIR_NAME = ".trashtables"; + /** Used to construct the name of the compaction directory during compaction */ public static final String HREGION_COMPACTIONDIR_NAME = "compaction.dir"; Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (revision 1352396) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (working copy) @@ -116,6 +116,7 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.metrics.util.MBeanUtil; import org.apache.hadoop.net.DNS; +import org.apache.hadoop.util.StringUtils; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.Watcher; import org.apache.hadoop.hbase.ServerLoad; @@ -266,6 +267,7 @@ private CatalogJanitor catalogJanitorChore; private LogCleaner logCleaner; + private TrashCleaner trashCleaner; private MasterCoprocessorHost cpHost; private final ServerName serverName; @@ -966,6 +968,13 @@ getMasterFileSystem().getOldLogDir()); Threads.setDaemonThreadRunning(logCleaner.getThread(), n + ".oldLogCleaner"); + this.trashCleaner = new TrashCleaner(conf.getInt( + "hbase.master.trash.cleaner.interval", 3600 * 1000), this, conf, + getMasterFileSystem().getFileSystem(), getMasterFileSystem() + .getTrashDir()); + Threads.setDaemonThreadRunning(trashCleaner.getThread(), n + + ".trashCleaner"); + // Put up info server. int port = this.conf.getInt(HConstants.MASTER_INFO_PORT, 60010); if (port >= 0) { @@ -1001,6 +1010,7 @@ this.rpcServerOpen = false; // Clean up and close up shop if (this.logCleaner!= null) this.logCleaner.interrupt(); + if (this.trashCleaner!= null) this.trashCleaner.interrupt(); if (this.infoServer != null) { LOG.info("Stopping infoServer"); try { @@ -1357,6 +1367,14 @@ cpHost.preCreateTable(hTableDescriptor, newRegions); } + long reservedTimeInTrash = trashCleaner + .getReservedTimeInTrash(hTableDescriptor.getNameAsString()); + if (reservedTimeInTrash > 0) { + LOG.warn("Creating table " + hTableDescriptor.getNameAsString() + + "whose name is the same as one that was put into trash " + + StringUtils.formatTime(reservedTimeInTrash) + " ago"); + } + this.executorService.submit(new CreateTableHandler(this, this.fileSystemManager, this.serverManager, hTableDescriptor, conf, newRegions, catalogTracker, assignmentManager)); Index: hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTrashCleaner.java =================================================================== --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTrashCleaner.java (revision 0) +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTrashCleaner.java (revision 0) @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(MediumTests.class) +public class TestTrashCleaner { + final Log LOG = LogFactory.getLog(getClass()); + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private HBaseAdmin admin; + + @BeforeClass + public static void setUpBeforeClass() throws Exception { + TEST_UTIL.getConfiguration().setBoolean("hbase.master.trash.enable", + true); + TEST_UTIL.getConfiguration().setInt("hbase.master.trash.cleaner.interval", + 2000); + TEST_UTIL.getConfiguration().setLong("hbase.master.trash.keeptime", + 15 * 1000); + TEST_UTIL.startMiniCluster(); + } + + @AfterClass + public static void tearDownAfterClass() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void setUp() throws Exception { + this.admin = new HBaseAdmin(TEST_UTIL.getConfiguration()); + } + + @Test + public void testTrashCleaner() throws Exception { + final byte[] row = Bytes.toBytes("row"); + final byte[] qualifier = Bytes.toBytes("qualifier"); + final byte[] value = Bytes.toBytes("value"); + final byte[] table = Bytes.toBytes("testTrashCleaner"); + HTable ht = TEST_UTIL.createTable(table, HConstants.CATALOG_FAMILY); + + Put put = new Put(row); + put.add(HConstants.CATALOG_FAMILY, qualifier, value); + ht.put(put); + Get get = new Get(row); + get.addColumn(HConstants.CATALOG_FAMILY, qualifier); + ht.get(get); + + this.admin.disableTable(table); + this.admin.deleteTable(table); + + MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster() + .getMasterFileSystem(); + + assertTrue(mfs.getFileSystem().exists(mfs.getTrashDir())); + + // one trash table + FileStatus[] files = mfs.getFileSystem().listStatus(mfs.getTrashDir()); + assertEquals(1, files.length); + + // one region in trash table + Path trashTablePath = files[0].getPath(); + FileStatus[] fileStatuses = mfs.getFileSystem().listStatus(trashTablePath, + new PathFilter() { + public boolean accept(Path path) { + if (path.getName().startsWith(".")) { + return false; + } + return true; + } + }); + assertEquals(1, fileStatuses.length); + + int waitTime = 0; + while (true) { + if (mfs.getFileSystem().exists(trashTablePath)) { + Thread.sleep(10); + waitTime += 10; + if (waitTime > 30 * 1000) { + fail("Wait time =" + waitTime + ", but trash table " + + Bytes.toString(table) + " hasn't been cleaned"); + } + } else { + break; + } + } + } + + @org.junit.Rule + public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu = new org.apache.hadoop.hbase.ResourceCheckerJUnitRule(); +} Index: hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java =================================================================== --- hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (revision 1352396) +++ hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (working copy) @@ -23,7 +23,6 @@ import java.util.ArrayList; import java.util.List; import java.util.Set; -import java.util.UUID; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -53,6 +52,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.Pair; /** * This class abstracts a bunch of operations the HMaster needs to interact with @@ -76,6 +76,8 @@ private volatile boolean fsOk = true; // The Path to the old logs dir private final Path oldLogDir; + // The Path to keep trash tables dir + private final Path trashDir; // root hbase directory on the FS private final Path rootdir; // create the split log lock @@ -113,20 +115,24 @@ } // setup the filesystem variable // set up the archived logs path - this.oldLogDir = createInitialFileSystemLayout(); + // set up the trashed tables path + Pair paths = createInitialFileSystemLayout(); + this.oldLogDir = paths.getFirst(); + this.trashDir = paths.getSecond(); } /** * Create initial layout in filesystem. *
    *
  1. Check if the root region exists and is readable, if not create it. - * Create hbase.version and the -ROOT- directory if not one. - *
  2. + * Create hbase.version and the -ROOT- directory if not one. *
  3. Create a log archive directory for RS to put archived logs
  4. + *
  5. Create a table trash directory for MASTER to put dropped tables
  6. *
* Idempotent. + * @return A pair of oldLogDir path and trashTableDir path */ - private Path createInitialFileSystemLayout() throws IOException { + private Pair createInitialFileSystemLayout() throws IOException { // check if the root directory exists checkRootDir(this.rootdir, conf, this.fs); @@ -137,7 +143,13 @@ this.fs.mkdirs(oldLogDir); } - return oldLogDir; + Path trashDir = new Path(this.rootdir, HConstants.TRASH_DIR_NAME); + // Make sure MASTER can move dropped tables to trash tables dir + if (!this.fs.exists(trashDir)) { + this.fs.mkdirs(trashDir); + } + + return new Pair(oldLogDir, trashDir); } public FileSystem getFileSystem() { @@ -153,6 +165,14 @@ } /** + * Get the directory where dropped tables go + * @return the dir + */ + public Path getTrashDir() { + return this.trashDir; + } + + /** * Checks to see if the file system is still accessible. * If not, sets closed * @return false if file system is not available @@ -443,6 +463,49 @@ fs.delete(HRegion.getRegionDir(rootdir, region), true); } + /** + * Move dropped table's region to trash table directory. + * @param region + * @param trashTableName + * @throws IOException + */ + public void moveRegionToTrash(HRegionInfo region, String trashTableName) + throws IOException { + Path trashTablePath = new Path(trashDir, trashTableName); + if (!fs.exists(trashTablePath)) { + fs.mkdirs(trashTablePath); + } + Path trashRegionPath = new Path(trashTablePath, region.getEncodedName()); + if (!fs.rename(HRegion.getRegionDir(rootdir, region), trashRegionPath)) { + String msg = "Failed moving region " + region.getRegionNameAsString() + + " to trash "; + LOG.warn(msg); + throw new IOException(msg); + } + } + + /** + * Move all dropped table's files from table directory to trash table + * directory, e.g. table info files + * @param tableName + * @param trashTableName + * @throws IOException + */ + public void moveAllFilesToTrash(byte[] tableName, String trashTableName) + throws IOException { + Path tableDir = HTableDescriptor.getTableDir(rootdir, tableName); + Path trashTablePath = new Path(trashDir, trashTableName); + FileStatus[] files = fs.listStatus(tableDir); + for (FileStatus file : files) { + if (!fs.rename(file.getPath(), new Path(trashTablePath, file.getPath() + .getName()))) { + String msg = "Failed moving " + file.getPath() + " to trash"; + LOG.warn(msg); + throw new IOException(msg); + } + } + } + public void deleteTable(byte[] tableName) throws IOException { fs.delete(new Path(rootdir, Bytes.toString(tableName)), true); }