diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java index 14f0260..bb5784f 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hbase.regionserver; -import static org.junit.Assert.*; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.ArrayList; @@ -52,7 +52,6 @@ import org.apache.hadoop.hbase.io.hfile.TestHFileWriterV2; import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.util.Bytes; - import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -63,7 +62,7 @@ import org.junit.runners.Parameterized.Parameters; /** * Tests {@link HFile} cache-on-write functionality for data blocks, non-root - * index blocks, and Bloom filter blocks, as specified by the column family. + * index blocks, and Bloom filter blocks, as specified by the column family. */ @RunWith(Parameterized.class) @Category(MediumTests.class) @@ -120,7 +119,9 @@ public class TestCacheOnWriteInSchema { private final CacheOnWriteType cowType; private Configuration conf; private final String testDescription; + private HRegion region; private HStore store; + private HLog hlog; private FileSystem fs; public TestCacheOnWriteInSchema(CacheOnWriteType cowType) { @@ -161,13 +162,15 @@ public class TestCacheOnWriteInSchema { Path oldLogDir = new Path(basedir, HConstants.HREGION_OLDLOGDIR_NAME); fs.delete(logdir, true); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); - HLog hlog = new HLog(fs, logdir, oldLogDir, conf); - HRegion region = new HRegion(basedir, hlog, fs, conf, info, htd, null); + hlog = new HLog(fs, logdir, oldLogDir, conf); + region = new HRegion(basedir, hlog, fs, conf, info, htd, null); store = new HStore(basedir, region, hcd, fs, conf); } @After - public void tearDown() { + public void tearDown() throws IOException { + region.close(); + hlog.close(); try { fs.delete(new Path(DIR), true); } catch (IOException e) { @@ -187,7 +190,7 @@ public class TestCacheOnWriteInSchema { } private void readStoreFile(Path path) throws IOException { - CacheConfig cacheConf = store.getCacheConfig(); + CacheConfig cacheConf = store.getCacheConfig(); BlockCache cache = cacheConf.getBlockCache(); StoreFile sf = new StoreFile(fs, path, conf, cacheConf, BloomType.ROWCOL, null); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java index 9e49dc8..e2bc684 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactSelection.java @@ -32,15 +32,20 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder; -import org.apache.hadoop.hbase.regionserver.compactions.CompactSelection; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.util.Bytes; +import org.junit.After; +import org.junit.experimental.categories.Category; import com.google.common.collect.Lists; -import org.junit.experimental.categories.Category; @Category(SmallTests.class) public class TestCompactSelection extends TestCase { @@ -59,6 +64,8 @@ public class TestCompactSelection extends TestCase { private static final long minSize = 10; private static final long maxSize = 1000; + private HLog hlog; + private HRegion region; @Override public void setUp() throws Exception { @@ -84,8 +91,8 @@ public class TestCompactSelection extends TestCase { htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(htd.getName(), null, null, false); - HLog hlog = new HLog(fs, logdir, oldLogDir, conf); - HRegion region = HRegion.createHRegion(info, basedir, conf, htd); + hlog = new HLog(fs, logdir, oldLogDir, conf); + region = HRegion.createHRegion(info, basedir, conf, htd); HRegion.closeHRegion(region); Path tableDir = new Path(basedir, Bytes.toString(htd.getName())); region = new HRegion(tableDir, hlog, fs, conf, info, htd, null); @@ -95,6 +102,12 @@ public class TestCompactSelection extends TestCase { fs.create(TEST_FILE); } + @After + public void tearDown() throws IOException { + region.close(); + hlog.closeAndDelete(); + } + // used so our tests don't deal with actual StoreFiles static class MockStoreFile extends StoreFile { long length = 0; @@ -155,13 +168,13 @@ public class TestCompactSelection extends TestCase { } return aNums; } - - void compactEquals(List candidates, long ... expected) + + void compactEquals(List candidates, long ... expected) throws IOException { compactEquals(candidates, false, expected); } - void compactEquals(List candidates, boolean forcemajor, + void compactEquals(List candidates, boolean forcemajor, long ... expected) throws IOException { store.forceMajor = forcemajor; @@ -188,7 +201,7 @@ public class TestCompactSelection extends TestCase { compactEquals(sfCreate(tooBig, tooBig, 700,700) /* empty */); // small files = don't care about ratio compactEquals(sfCreate(8,3,1), 8,3,1); - /* TODO: add sorting + unit test back in when HBASE-2856 is fixed + /* TODO: add sorting + unit test back in when HBASE-2856 is fixed // sort first so you don't include huge file the tail end // happens with HFileOutputFormat bulk migration compactEquals(sfCreate(100,50,23,12,12, 500), 23, 12, 12); @@ -198,7 +211,7 @@ public class TestCompactSelection extends TestCase { store.compactSelection(sfCreate(7,6,5,4,3,2,1)).getFilesToCompact().size()); // note: file selection starts with largest to smallest. compactEquals(sfCreate(7, 6, 5, 4, 3, 2, 1), 7, 6, 5, 4, 3); - + /* MAJOR COMPACTION */ // if a major compaction has been forced, then compact everything compactEquals(sfCreate(50,25,12,12), true, 50, 25, 12, 12); @@ -237,7 +250,7 @@ public class TestCompactSelection extends TestCase { store.compactSelection(sfCreate(true, 7,6,5,4,3,2,1)).getFilesToCompact().size()); // reference compaction compactEquals(sfCreate(true, 7, 6, 5, 4, 3, 2, 1), 5, 4, 3, 2, 1); - + // empty case compactEquals(new ArrayList() /* empty */); // empty case (because all files are too big)