Index: src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java (revision 1546419) +++ src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java (working copy) @@ -29,7 +29,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.master.MasterFileSystem; @@ -107,30 +106,27 @@ admin.snapshot(emptySnapshot, tableName); HTable table = new HTable(TEST_UTIL.getConfiguration(), tableName); - try { - // enable table and insert data - admin.enableTable(tableName); - SnapshotTestingUtils.loadData(TEST_UTIL, table, 500, FAMILY); - snapshot0Rows = TEST_UTIL.countRows(table); - admin.disableTable(tableName); + // enable table and insert data + admin.enableTable(tableName); + SnapshotTestingUtils.loadData(TEST_UTIL, table, 500, FAMILY); + snapshot0Rows = TEST_UTIL.countRows(table); + admin.disableTable(tableName); - // take a snapshot - admin.snapshot(snapshotName0, tableName); + // take a snapshot + admin.snapshot(snapshotName0, tableName); - // enable table and insert more data - admin.enableTable(tableName); - SnapshotTestingUtils.loadData(TEST_UTIL, table, 500, FAMILY); - snapshot1Rows = TEST_UTIL.countRows(table); - admin.disableTable(tableName); + // enable table and insert more data + admin.enableTable(tableName); + SnapshotTestingUtils.loadData(TEST_UTIL, table, 500, FAMILY); + snapshot1Rows = TEST_UTIL.countRows(table); + admin.disableTable(tableName); - // take a snapshot of the updated table - admin.snapshot(snapshotName1, tableName); + // take a snapshot of the updated table + admin.snapshot(snapshotName1, tableName); - // re-enable table - admin.enableTable(tableName); - } finally { - table.close(); - } + // re-enable table + admin.enableTable(tableName); + table.close(); } @After Index: src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java =================================================================== --- src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java (revision 1546419) +++ src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java (working copy) @@ -17,30 +17,19 @@ */ package org.apache.hadoop.hbase.snapshot; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - import java.io.IOException; -import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.LargeTests; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.master.MasterFileSystem; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.MD5Hash; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -61,7 +50,6 @@ private final static HBaseTestingUtility UTIL = new HBaseTestingUtility(); private final byte[] FAMILY = Bytes.toBytes("cf"); - private static final byte[] TEST_QUAL = Bytes.toBytes("q"); private byte[] snapshotName0; private byte[] snapshotName1; @@ -109,33 +97,30 @@ // create Table and disable it SnapshotTestingUtils.createTable(UTIL, tableName, FAMILY); HTable table = new HTable(UTIL.getConfiguration(), tableName); - try { - SnapshotTestingUtils.loadData(UTIL, table, 500, FAMILY); - snapshot0Rows = UTIL.countRows(table); - LOG.info("=== before snapshot with 500 rows"); - logFSTree(); + SnapshotTestingUtils.loadData(UTIL, table, 500, FAMILY); + snapshot0Rows = UTIL.countRows(table); + LOG.info("=== before snapshot with 500 rows"); + logFSTree(); - // take a snapshot - admin.snapshot(Bytes.toString(snapshotName0), Bytes.toString(tableName), - SnapshotDescription.Type.FLUSH); + // take a snapshot + admin.snapshot(Bytes.toString(snapshotName0), Bytes.toString(tableName), + SnapshotDescription.Type.FLUSH); - LOG.info("=== after snapshot with 500 rows"); - logFSTree(); + LOG.info("=== after snapshot with 500 rows"); + logFSTree(); - // insert more data - SnapshotTestingUtils.loadData(UTIL, table, 500, FAMILY); - snapshot1Rows = UTIL.countRows(table); - LOG.info("=== before snapshot with 1000 rows"); - logFSTree(); + // insert more data + SnapshotTestingUtils.loadData(UTIL, table, 500, FAMILY); + snapshot1Rows = UTIL.countRows(table); + LOG.info("=== before snapshot with 1000 rows"); + logFSTree(); - // take a snapshot of the updated table - admin.snapshot(Bytes.toString(snapshotName1), Bytes.toString(tableName), - SnapshotDescription.Type.FLUSH); - LOG.info("=== after snapshot with 1000 rows"); - logFSTree(); - } finally { - table.close(); - } + // take a snapshot of the updated table + admin.snapshot(Bytes.toString(snapshotName1), Bytes.toString(tableName), + SnapshotDescription.Type.FLUSH); + LOG.info("=== after snapshot with 1000 rows"); + logFSTree(); + table.close(); } @After