diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 6b628e8..99c08ea 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -2473,7 +2473,7 @@ public class HRegion implements HeapSize { // , Writable{ // files/batch, far more than the number of store files under a single column family. for (Store store : stores.values()) { // 2.1. build the snapshot reference directory for the store - Path dstStoreDir = snapshotRegionFs.getStoreDir(store.getFamily().getNameAsString()); + Path dstStoreDir = snapshotRegionFs.createStoreDir(store.getFamily().getNameAsString()); List storeFiles = new ArrayList(store.getStorefiles()); if (LOG.isDebugEnabled()) { LOG.debug("Adding snapshot references for " + storeFiles + " hfiles"); diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceRegionHFilesTask.java hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceRegionHFilesTask.java index 60d48d9..8298bc1 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceRegionHFilesTask.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/ReferenceRegionHFilesTask.java @@ -96,16 +96,16 @@ public class ReferenceRegionHFilesTask extends SnapshotTask { // get all the hfiles in the family FileStatus[] hfiles = FSUtils.listStatus(fs, familyDir, fileFilter); + // make the snapshot's family directory + Path snapshotFamilyDir = snapshotFamilyDirs.get(i); + fs.mkdirs(snapshotFamilyDir); + // if no hfiles, then we are done with this family if (hfiles == null || hfiles.length == 0) { LOG.debug("Not hfiles found for family: " + familyDir + ", skipping."); continue; } - // make the snapshot's family directory - Path snapshotFamilyDir = snapshotFamilyDirs.get(i); - fs.mkdirs(snapshotFamilyDir); - // create a reference for each hfile for (FileStatus hfile : hfiles) { // references are 0-length files, relying on file name. diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java index 581d980..e164d21 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java @@ -29,10 +29,10 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.LargeTests; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; @@ -252,4 +252,65 @@ public class TestSnapshotFromClient { LOG.info("Correctly failed to snapshot a non-existant table:" + e.getMessage()); } } + + @Test (timeout=300000) + public void testOfflineTableSnapshotWithEmptyRegions() throws Exception { + doTestTableSnapshotWithEmptyRegions(true); + } + + @Test (timeout=300000) + public void testOnlineTableSnapshotWithEmptyRegions() throws Exception { + doTestTableSnapshotWithEmptyRegions(false); + } + + private void doTestTableSnapshotWithEmptyRegions(boolean isOffline) throws Exception { + // test with an empty table with one region + + HBaseAdmin admin = UTIL.getHBaseAdmin(); + // make sure we don't fail on listing snapshots + SnapshotTestingUtils.assertNoSnapshots(admin); + + // get the name of all the regionservers hosting the snapshotted table + Set snapshotServers = new HashSet(); + List servers = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads(); + for (RegionServerThread server : servers) { + if (server.getRegionServer().getOnlineRegions(TABLE_NAME).size() > 0) { + snapshotServers.add(server.getRegionServer().getServerName().toString()); + } + } + + if (isOffline) { + LOG.debug("FS state before disable:"); + FSUtils.logFileSystemState(UTIL.getTestFileSystem(), + FSUtils.getRootDir(UTIL.getConfiguration()), LOG); + admin.disableTable(TABLE_NAME); + } + + LOG.debug("FS state before snapshot:"); + FSUtils.logFileSystemState(UTIL.getTestFileSystem(), + FSUtils.getRootDir(UTIL.getConfiguration()), LOG); + + // take a snapshot of the disabled table + byte[] snapshot = Bytes.toBytes("testOfflineTableSnapshotWithEmptyRegions"); + admin.snapshot(snapshot, TABLE_NAME); + LOG.debug("Snapshot completed."); + + // make sure we have the snapshot + List snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin, + snapshot, TABLE_NAME); + + // make sure its a valid snapshot + FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem(); + Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); + LOG.debug("FS state after snapshot:"); + FSUtils.logFileSystemState(UTIL.getTestFileSystem(), + FSUtils.getRootDir(UTIL.getConfiguration()), LOG); + + SnapshotTestingUtils.confirmSnapshotValid(snapshots.get(0), TABLE_NAME, TEST_FAM, rootDir, + admin, fs, false, new Path(rootDir, HConstants.HREGION_LOGDIR_NAME), true, snapshotServers); + + admin.deleteSnapshot(snapshot); + snapshots = admin.listSnapshots(); + SnapshotTestingUtils.assertNoSnapshots(admin); + } } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java index afba048..e7ce28b 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java @@ -34,12 +34,12 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.HBaseAdmin; @@ -210,11 +210,12 @@ public class SnapshotTestingUtils { HRegionInfo snapshotRegionInfo = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); assertEquals(info, snapshotRegionInfo); + Path familyDir = new Path(regionDir, Bytes.toString(testFamily)); + assertTrue("Expected to find: " + familyDir + ", but it doesn't exist", + fs.exists(familyDir)); + // check to make sure we have the family if (!familyEmpty) { - Path familyDir = new Path(regionDir, Bytes.toString(testFamily)); - assertTrue("Expected to find: " + familyDir + ", but it doesn't exist", - fs.exists(familyDir)); // make sure we have some files references assertTrue(fs.listStatus(familyDir).length > 0); }