diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
index 25f9727..21e6daa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
@@ -431,21 +431,15 @@ public class CacheConfig {
* Static reference to the block cache, or null if no caching should be used
* at all.
*/
- // Clear this if in tests you'd make more than one block cache instance.
@VisibleForTesting
static BlockCache GLOBAL_BLOCK_CACHE_INSTANCE;
- /** Boolean whether we have disabled the block cache entirely. */
- @VisibleForTesting
- static boolean blockCacheDisabled = false;
+ static final double MINIMUM_CACHE_PERCENTAGE = 0.0001f;
static long getLruCacheSize(final Configuration conf, final MemoryUsage mu) {
float cachePercentage = conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,
HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
- if (cachePercentage <= 0.0001f) {
- blockCacheDisabled = true;
- return -1;
- }
+ if (!isBlockCacheSizeAboveMinimumPercentage(cachePercentage)) return -1;
if (cachePercentage > 1.0) {
throw new IllegalArgumentException(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY +
" must be between 0.0 and 1.0, and not > 1.0");
@@ -456,16 +450,38 @@ public class CacheConfig {
}
/**
+ * Whether to enable block cache.
+ * @param c
+ * @return True if we should enable block cache.
+ */
+ private static boolean enableBlockCache(final Configuration c) {
+ // Currently, the key for enabling block cache is whether
+ // HFILE_BLOCK_CACHE_SIZE_KEY is > MINIMUM_CACHE_PERCENTAGE
+ return isBlockCacheSizeAboveMinimumPercentage(c.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,
+ HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT));
+ }
+
+ static boolean isBlockCacheSizeAboveMinimumPercentage(final double p) {
+ return p > MINIMUM_CACHE_PERCENTAGE;
+ }
+
+ /**
* @param c Configuration to use.
* @param mu JMX Memory Bean
* @return An L1 instance. Currently an instance of LruBlockCache.
*/
private static LruBlockCache getL1(final Configuration c, final MemoryUsage mu) {
long lruCacheSize = getLruCacheSize(c, mu);
+ if (lruCacheSize == -1) {
+ LOG.info("L1 is disabled. Set " +
+ HConstants.HFILE_BLOCK_CACHE_SIZE_KEY + " to a non-zero value > " +
+ MINIMUM_CACHE_PERCENTAGE);
+ }
int blockSize = c.getInt("hbase.offheapcache.minblocksize", HConstants.DEFAULT_BLOCKSIZE);
+ boolean evictionThread = c.getBoolean(LruBlockCache.LRU_EVICTION_THREAD_KEY, true);
LOG.info("Allocating LruBlockCache size=" +
StringUtils.byteDesc(lruCacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize));
- return new LruBlockCache(lruCacheSize, blockSize, true, c);
+ return new LruBlockCache(lruCacheSize, blockSize, evictionThread, c);
}
/**
@@ -523,27 +539,23 @@ public class CacheConfig {
* @return The block cache or null.
*/
public static synchronized BlockCache instantiateBlockCache(Configuration conf) {
- if (GLOBAL_BLOCK_CACHE_INSTANCE != null) return GLOBAL_BLOCK_CACHE_INSTANCE;
- if (blockCacheDisabled) return null;
+ if (!enableBlockCache(conf)) return null;
MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
LruBlockCache l1 = getL1(conf, mu);
BucketCache l2 = getL2(conf, mu);
- if (l2 == null) {
- GLOBAL_BLOCK_CACHE_INSTANCE = l1;
- } else {
+ if (l2 != null) {
boolean combinedWithLru = conf.getBoolean(BUCKET_CACHE_COMBINED_KEY,
DEFAULT_BUCKET_CACHE_COMBINED);
if (combinedWithLru) {
- GLOBAL_BLOCK_CACHE_INSTANCE = new CombinedBlockCache(l1, l2);
+ return new CombinedBlockCache(l1, l2);
} else {
// L1 and L2 are not 'combined'. They are connected via the LruBlockCache victimhandler
// mechanism. It is a little ugly but works according to the following: when the
// background eviction thread runs, blocks evicted from L1 will go to L2 AND when we get
// a block from the L1 cache, if not in L1, we will search L2.
l1.setVictimCache(l2);
- GLOBAL_BLOCK_CACHE_INSTANCE = l1;
}
}
- return GLOBAL_BLOCK_CACHE_INSTANCE;
+ return l1;
}
-}
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index a5ffaa2..c5d64a1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -113,6 +113,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
static final String LRU_SINGLE_PERCENTAGE_CONFIG_NAME = "hbase.lru.blockcache.single.percentage";
static final String LRU_MULTI_PERCENTAGE_CONFIG_NAME = "hbase.lru.blockcache.multi.percentage";
static final String LRU_MEMORY_PERCENTAGE_CONFIG_NAME = "hbase.lru.blockcache.memory.percentage";
+ public static final String LRU_EVICTION_THREAD_KEY = "hbase.lru.blockcache.evictionThread";
/**
* Configuration key to force data-block always (except in-memory are too much)
@@ -290,7 +291,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
this.elements = new AtomicLong(0);
this.overhead = calculateOverhead(maxSize, blockSize, mapConcurrencyLevel);
this.size = new AtomicLong(this.overhead);
- if(evictionThread) {
+ if (evictionThread) {
this.evictionThread = new EvictionThread(this);
this.evictionThread.start(); // FindBugs SC_START_IN_CTOR
} else {
@@ -910,8 +911,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
}
public void shutdown() {
- if (victimHandler != null)
- victimHandler.shutdown();
+ if (victimHandler != null) this.victimHandler.shutdown();
this.scheduleThreadPool.shutdown();
for (int i = 0; i < 10; i++) {
if (!this.scheduleThreadPool.isShutdown()) {
@@ -929,7 +929,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
List runnables = this.scheduleThreadPool.shutdownNow();
LOG.debug("Still running " + runnables);
}
- this.evictionThread.shutdown();
+ if (this.evictionThread != null) this.evictionThread.shutdown();
}
/** Clears the cache. Used in tests. */
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
index bbf385b..918995f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
@@ -44,14 +44,11 @@ public class TestBlockCacheReporting {
@Before
public void setUp() throws Exception {
- CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null;
this.conf = HBaseConfiguration.create();
}
@After
public void tearDown() throws Exception {
- // Let go of current block cache.
- CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null;
}
private void addDataAndHits(final BlockCache bc, final int count) {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
index 156bfc1..dc3829b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
@@ -141,14 +141,11 @@ public class TestCacheConfig {
@Before
public void setUp() throws Exception {
- CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null;
this.conf = HBaseConfiguration.create();
}
@After
public void tearDown() throws Exception {
- // Let go of current block cache.
- CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null;
}
/**
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
index 7ed3959..51edd19 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
@@ -95,8 +95,6 @@ public class TestForceCacheImportantBlocks {
@Before
public void setup() {
- // Make sure we make a new one each time.
- CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null;
HFile.dataBlockReadCnt.set(0);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java
index 6e0a2ca..52aabf6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java
@@ -57,7 +57,6 @@ public class TestPrefetch {
conf.setInt(HFile.FORMAT_VERSION_KEY, 3);
conf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true);
fs = HFileSystem.get(conf);
- CacheConfig.blockCacheDisabled = false;
cacheConf = new CacheConfig(conf);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java
index 68dcc0f..36cb069 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotManager.java
@@ -49,7 +49,6 @@ import org.mockito.Mockito;
@Category(SmallTests.class)
public class TestSnapshotManager {
private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-
MasterServices services = Mockito.mock(MasterServices.class);
MetricsMaster metrics = Mockito.mock(MetricsMaster.class);
ProcedureCoordinator coordinator = Mockito.mock(ProcedureCoordinator.class);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
index 115926e..e105767 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder;
import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl;
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
+import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
import org.apache.hadoop.hbase.util.BloomFilterFactory;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ChecksumType;
@@ -71,20 +72,26 @@ import com.google.common.collect.Lists;
public class TestStoreFile extends HBaseTestCase {
static final Log LOG = LogFactory.getLog(TestStoreFile.class);
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
- private CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
private static String ROOT_DIR = TEST_UTIL.getDataTestDir("TestStoreFile").toString();
private static final ChecksumType CKTYPE = ChecksumType.CRC32;
private static final int CKBYTES = 512;
private static String TEST_FAMILY = "cf";
+ private CacheConfig cc;
@Override
public void setUp() throws Exception {
- super.setUp();
+ Configuration c = new Configuration(TEST_UTIL.getConfiguration());
+ this.fs = FileSystem.get(c);
+ this.testDir = TEST_UTIL.getDataTestDir();
+ // Turn off eviction thread. Complicates asserts
+ c.setBoolean(LruBlockCache.LRU_EVICTION_THREAD_KEY, false);
+ this.cc = new CacheConfig(c);
}
@Override
public void tearDown() throws Exception {
- super.tearDown();
+ this.cc.getBlockCache().shutdown();
+ this.cc = null;
}
/**
@@ -99,15 +106,14 @@ public class TestStoreFile extends HBaseTestCase {
conf, fs, new Path(this.testDir, hri.getTable().getNameAsString()), hri);
HFileContext meta = new HFileContextBuilder().withBlockSize(2*1024).build();
- StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
+ StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cc, this.fs)
.withFilePath(regionFs.createTempName())
.withFileContext(meta)
.build();
writeStoreFile(writer);
Path sfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
- StoreFile sf = new StoreFile(this.fs, sfPath, conf, cacheConf,
- BloomType.NONE);
+ StoreFile sf = new StoreFile(this.fs, sfPath, conf, cc, BloomType.NONE);
checkHalfHFile(regionFs, sf);
}
@@ -151,15 +157,14 @@ public class TestStoreFile extends HBaseTestCase {
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
- StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
+ StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cc, this.fs)
.withFilePath(regionFs.createTempName())
.withFileContext(meta)
.build();
writeStoreFile(writer);
Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
- StoreFile hsf = new StoreFile(this.fs, hsfPath, conf, cacheConf,
- BloomType.NONE);
+ StoreFile hsf = new StoreFile(this.fs, hsfPath, conf, cc, BloomType.NONE);
StoreFile.Reader reader = hsf.createReader();
// Split on a row, not in middle of row. Midkey returned by reader
// may be in middle of row. Create new one with empty column and
@@ -171,8 +176,7 @@ public class TestStoreFile extends HBaseTestCase {
// Make a reference
HRegionInfo splitHri = new HRegionInfo(hri.getTable(), null, midRow);
Path refPath = splitStoreFile(regionFs, splitHri, TEST_FAMILY, hsf, midRow, true);
- StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf,
- BloomType.NONE);
+ StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cc, BloomType.NONE);
// Now confirm that I can read from the reference and that it only gets
// keys from top half of the file.
HFileScanner s = refHsf.createReader().getScanner(false, false);
@@ -197,7 +201,7 @@ public class TestStoreFile extends HBaseTestCase {
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
- StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
+ StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cc, this.fs)
.withFilePath(regionFs.createTempName())
.withFileContext(meta)
.build();
@@ -211,8 +215,7 @@ public class TestStoreFile extends HBaseTestCase {
// Try to open store file from link
StoreFileInfo storeFileInfo = new StoreFileInfo(testConf, this.fs, linkFilePath);
- StoreFile hsf = new StoreFile(this.fs, storeFileInfo, testConf, cacheConf,
- BloomType.NONE);
+ StoreFile hsf = new StoreFile(this.fs, storeFileInfo, testConf, cc, BloomType.NONE);
assertTrue(storeFileInfo.isLink());
// Now confirm that I can read from the link
@@ -241,7 +244,7 @@ public class TestStoreFile extends HBaseTestCase {
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it. ////
- StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs)
+ StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cc, this.fs)
.withFilePath(regionFs.createTempName())
.withFileContext(meta)
.build();
@@ -263,7 +266,7 @@ public class TestStoreFile extends HBaseTestCase {
// /clone/splitB//
HRegionInfo splitHriA = new HRegionInfo(hri.getTable(), null, SPLITKEY);
HRegionInfo splitHriB = new HRegionInfo(hri.getTable(), SPLITKEY, null);
- StoreFile f = new StoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE);
+ StoreFile f = new StoreFile(fs, linkFilePath, testConf, cc, BloomType.NONE);
Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true); // top
Path pathB = splitStoreFile(cloneRegionFs, splitHriB, TEST_FAMILY, f, SPLITKEY, false);// bottom
@@ -274,8 +277,7 @@ public class TestStoreFile extends HBaseTestCase {
// reference to a hfile link. This code in StoreFile that handles this case.
// Try to open store file from link
- StoreFile hsfA = new StoreFile(this.fs, pathA, testConf, cacheConf,
- BloomType.NONE);
+ StoreFile hsfA = new StoreFile(this.fs, pathA, testConf, cc, BloomType.NONE);
// Now confirm that I can read from the ref to link
int count = 1;
@@ -287,8 +289,7 @@ public class TestStoreFile extends HBaseTestCase {
assertTrue(count > 0); // read some rows here
// Try to open store file from link
- StoreFile hsfB = new StoreFile(this.fs, pathB, testConf, cacheConf,
- BloomType.NONE);
+ StoreFile hsfB = new StoreFile(this.fs, pathB, testConf, cc, BloomType.NONE);
// Now confirm that I can read from the ref to link
HFileScanner sB = hsfB.createReader().getScanner(false, false);
@@ -319,9 +320,9 @@ public class TestStoreFile extends HBaseTestCase {
Path bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, midRow, false);
// Make readers on top and bottom.
StoreFile.Reader top = new StoreFile(
- this.fs, topPath, conf, cacheConf, BloomType.NONE).createReader();
+ this.fs, topPath, conf, cc, BloomType.NONE).createReader();
StoreFile.Reader bottom = new StoreFile(
- this.fs, bottomPath, conf, cacheConf, BloomType.NONE).createReader();
+ this.fs, bottomPath, conf, cc, BloomType.NONE).createReader();
ByteBuffer previous = null;
LOG.info("Midkey: " + midKV.toString());
ByteBuffer bbMidkeyBytes = ByteBuffer.wrap(midkey);
@@ -379,7 +380,7 @@ public class TestStoreFile extends HBaseTestCase {
assertNull(bottomPath);
- top = new StoreFile(this.fs, topPath, conf, cacheConf, BloomType.NONE).createReader();
+ top = new StoreFile(this.fs, topPath, conf, cc, BloomType.NONE).createReader();
// Now read from the top.
first = true;
topScanner = top.getScanner(false, false);
@@ -413,8 +414,7 @@ public class TestStoreFile extends HBaseTestCase {
topPath = splitStoreFile(regionFs,topHri, TEST_FAMILY, f, badmidkey, true);
bottomPath = splitStoreFile(regionFs, bottomHri, TEST_FAMILY, f, badmidkey, false);
assertNull(topPath);
- bottom = new StoreFile(this.fs, bottomPath, conf, cacheConf,
- BloomType.NONE).createReader();
+ bottom = new StoreFile(this.fs, bottomPath, conf, cc, BloomType.NONE).createReader();
first = true;
bottomScanner = bottom.getScanner(false, false);
while ((!bottomScanner.isSeeked() && bottomScanner.seekTo()) ||
@@ -460,7 +460,7 @@ public class TestStoreFile extends HBaseTestCase {
}
writer.close();
- StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf);
+ StoreFile.Reader reader = new StoreFile.Reader(fs, f, cc, conf);
reader.loadFileInfo();
reader.loadBloomfilter();
StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
@@ -504,7 +504,7 @@ public class TestStoreFile extends HBaseTestCase {
.withChecksumType(CKTYPE)
.withBytesPerCheckSum(CKBYTES).build();
// Make a store file and write data to it.
- StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
+ StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cc, this.fs)
.withFilePath(f)
.withBloomType(BloomType.ROW)
.withMaxKeyCount(2000)
@@ -527,7 +527,7 @@ public class TestStoreFile extends HBaseTestCase {
.withChecksumType(CKTYPE)
.withBytesPerCheckSum(CKBYTES).build();
// Make a store file and write data to it.
- StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
+ StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cc, this.fs)
.withFilePath(f)
.withMaxKeyCount(2000)
.withFileContext(meta)
@@ -543,7 +543,7 @@ public class TestStoreFile extends HBaseTestCase {
}
writer.close();
- StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf);
+ StoreFile.Reader reader = new StoreFile.Reader(fs, f, cc, conf);
reader.loadFileInfo();
reader.loadBloomfilter();
@@ -580,7 +580,7 @@ public class TestStoreFile extends HBaseTestCase {
Path f = new Path(ROOT_DIR, getName());
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
- StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
+ StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cc, this.fs)
.withFilePath(f)
.withFileContext(meta)
.build();
@@ -588,7 +588,7 @@ public class TestStoreFile extends HBaseTestCase {
writeStoreFile(writer);
writer.close();
- StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf);
+ StoreFile.Reader reader = new StoreFile.Reader(fs, f, cc, conf);
// Now do reseek with empty KV to position to the beginning of the file
@@ -625,7 +625,7 @@ public class TestStoreFile extends HBaseTestCase {
.withChecksumType(CKTYPE)
.withBytesPerCheckSum(CKBYTES).build();
// Make a store file and write data to it.
- StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
+ StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cc, this.fs)
.withFilePath(f)
.withBloomType(bt[x])
.withMaxKeyCount(expKeys[x])
@@ -647,7 +647,7 @@ public class TestStoreFile extends HBaseTestCase {
}
writer.close();
- StoreFile.Reader reader = new StoreFile.Reader(fs, f, cacheConf, conf);
+ StoreFile.Reader reader = new StoreFile.Reader(fs, f, cc, conf);
reader.loadFileInfo();
reader.loadBloomfilter();
StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
@@ -777,7 +777,7 @@ public class TestStoreFile extends HBaseTestCase {
Path dir = new Path(storedir, "1234567890");
HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
// Make a store file and write data to it.
- StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
+ StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cc, this.fs)
.withOutputDir(dir)
.withFileContext(meta)
.build();
@@ -791,7 +791,7 @@ public class TestStoreFile extends HBaseTestCase {
writer.appendMetadata(0, false);
writer.close();
- StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
+ StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cc,
BloomType.NONE);
StoreFile.Reader reader = hsf.createReader();
StoreFileScanner scanner = reader.getStoreFileScanner(false, false);
@@ -816,29 +816,27 @@ public class TestStoreFile extends HBaseTestCase {
}
public void testCacheOnWriteEvictOnClose() throws Exception {
- Configuration conf = this.conf;
+ final int fileBlockCount = 3;
// Find a home for our files (regiondir ("7e0102") and familyname).
- Path baseDir = new Path(new Path(this.testDir, "7e0102"),"twoCOWEOC");
+ Path baseDir = new Path(new Path(TEST_UTIL.getDataTestDir(), "7e0102"),"twoCOWEOC");
+ // Let's write a StoreFile with fileBlockCount blocks, with cache on write off
+ this.cc.setCacheDataOnWrite(false);
+ Path pathCowOff = new Path(baseDir, "123456789");
// Grab the block cache and get the initial hit/miss counts
- BlockCache bc = new CacheConfig(conf).getBlockCache();
+ BlockCache bc = cc.getBlockCache();
assertNotNull(bc);
CacheStats cs = bc.getStats();
long startHit = cs.getHitCount();
long startMiss = cs.getMissCount();
long startEvicted = cs.getEvictedCount();
- // Let's write a StoreFile with three blocks, with cache on write off
- conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
- CacheConfig cacheConf = new CacheConfig(conf);
- Path pathCowOff = new Path(baseDir, "123456789");
- StoreFile.Writer writer = writeStoreFile(conf, cacheConf, pathCowOff, 3);
- StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
- BloomType.NONE);
+ StoreFile.Writer writer = writeStoreFile(conf, cc, pathCowOff, fileBlockCount);
+ StoreFile hsf = new StoreFile(this.fs, writer.getPath(), conf, cc, BloomType.NONE);
LOG.debug(hsf.getPath().toString());
- // Read this file, we should see 3 misses
+ // Read this file, we should see fileBlockCount misses
StoreFile.Reader reader = hsf.createReader();
reader.loadFileInfo();
StoreFileScanner scanner = reader.getStoreFileScanner(true, true);
@@ -849,37 +847,39 @@ public class TestStoreFile extends HBaseTestCase {
assertEquals(startEvicted, cs.getEvictedCount());
startMiss += 3;
scanner.close();
- reader.close(cacheConf.shouldEvictOnClose());
+ reader.close(cc.shouldEvictOnClose());
+ startEvicted = cs.getEvictedCount();
+ assertEquals(startEvicted - (cc.shouldEvictOnClose()? fileBlockCount: 0),
+ cs.getEvictedCount());
- // Now write a StoreFile with three blocks, with cache on write on
- conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, true);
- cacheConf = new CacheConfig(conf);
+ // Now write a StoreFile with three blocks, with cache on write ON
+ cc.setCacheDataOnWrite(true);
Path pathCowOn = new Path(baseDir, "123456788");
- writer = writeStoreFile(conf, cacheConf, pathCowOn, 3);
- hsf = new StoreFile(this.fs, writer.getPath(), conf, cacheConf,
- BloomType.NONE);
+ writer = writeStoreFile(conf, cc, pathCowOn, 3);
+ startEvicted = cs.getEvictedCount();
+ hsf = new StoreFile(this.fs, writer.getPath(), conf, cc, BloomType.NONE);
- // Read this file, we should see 3 hits
+ // Read this file, we should see fileBlockCount hits because blocks already in cache.
reader = hsf.createReader();
scanner = reader.getStoreFileScanner(true, true);
scanner.seek(KeyValue.LOWESTKEY);
while (scanner.next() != null);
- assertEquals(startHit + 3, cs.getHitCount());
+ assertEquals(startHit + 3, cs.getHitCount()); // Blocks cached on write so hits, not misses
assertEquals(startMiss, cs.getMissCount());
assertEquals(startEvicted, cs.getEvictedCount());
startHit += 3;
scanner.close();
- reader.close(cacheConf.shouldEvictOnClose());
+ reader.close(cc.shouldEvictOnClose());
+ assertEquals(startEvicted - (cc.shouldEvictOnClose()? fileBlockCount: 0),
+ cs.getEvictedCount());
- // Let's read back the two files to ensure the blocks exactly match
- hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf,
- BloomType.NONE);
+ // Let's read back the two files to ensure the blocks exactly match.
+ hsf = new StoreFile(this.fs, pathCowOff, conf, cc, BloomType.NONE);
StoreFile.Reader readerOne = hsf.createReader();
readerOne.loadFileInfo();
StoreFileScanner scannerOne = readerOne.getStoreFileScanner(true, true);
scannerOne.seek(KeyValue.LOWESTKEY);
- hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf,
- BloomType.NONE);
+ hsf = new StoreFile(this.fs, pathCowOn, conf, cc, BloomType.NONE);
StoreFile.Reader readerTwo = hsf.createReader();
readerTwo.loadFileInfo();
StoreFileScanner scannerTwo = readerTwo.getStoreFileScanner(true, true);
@@ -899,36 +899,30 @@ public class TestStoreFile extends HBaseTestCase {
kv2.getValueArray(), kv2.getValueOffset(), kv2.getValueLength()) == 0);
}
assertNull(scannerTwo.next());
- assertEquals(startHit + 6, cs.getHitCount());
+ assertEquals(startHit + (fileBlockCount * 2), cs.getHitCount());
assertEquals(startMiss, cs.getMissCount());
assertEquals(startEvicted, cs.getEvictedCount());
- startHit += 6;
+ startHit += (fileBlockCount * 2);
scannerOne.close();
- readerOne.close(cacheConf.shouldEvictOnClose());
+ readerOne.close(cc.shouldEvictOnClose());
scannerTwo.close();
- readerTwo.close(cacheConf.shouldEvictOnClose());
+ readerTwo.close(cc.shouldEvictOnClose());
// Let's close the first file with evict on close turned on
- conf.setBoolean("hbase.rs.evictblocksonclose", true);
- cacheConf = new CacheConfig(conf);
- hsf = new StoreFile(this.fs, pathCowOff, conf, cacheConf,
- BloomType.NONE);
+ cc.setEvictOnClose(true);
+ hsf = new StoreFile(this.fs, pathCowOff, conf, cc, BloomType.NONE);
reader = hsf.createReader();
- reader.close(cacheConf.shouldEvictOnClose());
+ reader.close(cc.shouldEvictOnClose());
- // We should have 3 new evictions
- assertEquals(startHit, cs.getHitCount());
- assertEquals(startMiss, cs.getMissCount());
- assertEquals(startEvicted + 3, cs.getEvictedCount());
- startEvicted += 3;
+ // Three evictions
+ assertEquals(startEvicted + fileBlockCount, cs.getEvictedCount());
+ startEvicted += fileBlockCount;
// Let's close the second file with evict on close turned off
- conf.setBoolean("hbase.rs.evictblocksonclose", false);
- cacheConf = new CacheConfig(conf);
- hsf = new StoreFile(this.fs, pathCowOn, conf, cacheConf,
- BloomType.NONE);
+ cc.setEvictOnClose(false);
+ hsf = new StoreFile(this.fs, pathCowOn, conf, cc, BloomType.NONE);
reader = hsf.createReader();
- reader.close(cacheConf.shouldEvictOnClose());
+ reader.close(cc.shouldEvictOnClose());
// We expect no changes
assertEquals(startHit, cs.getHitCount());
@@ -951,7 +945,7 @@ public class TestStoreFile extends HBaseTestCase {
private StoreFile.Writer writeStoreFile(Configuration conf,
CacheConfig cacheConf, Path path, int numBlocks)
throws IOException {
- // Let's put ~5 small KVs in each block, so let's make 5*numBlocks KVs
+ // Let's put ~5 small KVs in each block, so let's make 5 * numBlocks KVs
int numKVs = 5 * numBlocks;
List kvs = new ArrayList(numKVs);
byte [] b = Bytes.toBytes("x");
@@ -989,30 +983,26 @@ public class TestStoreFile extends HBaseTestCase {
*/
public void testDataBlockEncodingMetaData() throws IOException {
// Make up a directory hierarchy that has a regiondir ("7e0102") and familyname.
- Path dir = new Path(new Path(this.testDir, "7e0102"), "familyname");
+ Path dir = new Path(new Path(this.testDir, "testDataBlockEncodingMetaData"), "familyname");
Path path = new Path(dir, "1234567890");
- DataBlockEncoding dataBlockEncoderAlgo =
- DataBlockEncoding.FAST_DIFF;
- HFileDataBlockEncoder dataBlockEncoder =
- new HFileDataBlockEncoderImpl(
- dataBlockEncoderAlgo);
- cacheConf = new CacheConfig(conf);
+ DataBlockEncoding dataBlockEncoderAlgo = DataBlockEncoding.FAST_DIFF;
+ HFileDataBlockEncoder dataBlockEncoder = new HFileDataBlockEncoderImpl(dataBlockEncoderAlgo);
+
HFileContext meta = new HFileContextBuilder().withBlockSize(BLOCKSIZE_SMALL)
.withChecksumType(CKTYPE)
.withBytesPerCheckSum(CKBYTES)
.withDataBlockEncoding(dataBlockEncoderAlgo)
.build();
// Make a store file and write data to it.
- StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
+ StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cc, this.fs)
.withFilePath(path)
.withMaxKeyCount(2000)
.withFileContext(meta)
.build();
writer.close();
- StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf,
- cacheConf, BloomType.NONE);
+ StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf, cc, BloomType.NONE);
StoreFile.Reader reader = storeFile.createReader();
Map fileInfo = reader.loadFileInfo();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestPoolMap.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestPoolMap.java
index bb958cf..34b892e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestPoolMap.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestPoolMap.java
@@ -24,10 +24,9 @@ import java.util.Random;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicBoolean;
-import junit.framework.Test;
import junit.framework.TestCase;
-import org.apache.hadoop.hbase.SmallTests;
+import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.util.PoolMap.PoolType;
import org.junit.experimental.categories.Category;
import org.junit.runner.RunWith;
@@ -36,7 +35,8 @@ import org.junit.runners.Suite;
@RunWith(Suite.class)
@Suite.SuiteClasses({TestPoolMap.TestRoundRobinPoolType.class, TestPoolMap.TestThreadLocalPoolType.class,
TestPoolMap.TestReusablePoolType.class})
-@Category(SmallTests.class)
+// Medium because uses lots of memory.
+@Category(MediumTests.class)
public class TestPoolMap {
public abstract static class TestPoolType extends TestCase {
protected PoolMap poolMap;
@@ -74,7 +74,7 @@ public class TestPoolMap {
}
}
- @Category(SmallTests.class)
+ @Category(MediumTests.class)
public static class TestRoundRobinPoolType extends TestPoolType {
@Override
protected PoolType getPoolType() {
@@ -136,7 +136,7 @@ public class TestPoolMap {
}
- @Category(SmallTests.class)
+ @Category(MediumTests.class)
public static class TestThreadLocalPoolType extends TestPoolType {
@Override
protected PoolType getPoolType() {
@@ -181,7 +181,7 @@ public class TestPoolMap {
}
- @Category(SmallTests.class)
+ @Category(MediumTests.class)
public static class TestReusablePoolType extends TestPoolType {
@Override
protected PoolType getPoolType() {
@@ -201,15 +201,15 @@ public class TestPoolMap {
ExecutionException {
// As long as we poll values we put, the pool size should remain zero
for (int i = 0; i < POOL_SIZE; i++) {
- String randomKey = String.valueOf(random.nextInt());
- String randomValue = String.valueOf(random.nextInt());
+ String randomKey = String.valueOf(random.nextInt() % 1024);
+ String randomValue = String.valueOf(random.nextInt() % 1024);
runThread(randomKey, randomValue, randomValue);
assertEquals(0, poolMap.size(randomKey));
}
poolMap.clear();
- String randomKey = String.valueOf(random.nextInt());
+ String randomKey = String.valueOf(random.nextInt() % 1024);
for (int i = 0; i < POOL_SIZE - 1; i++) {
- String randomValue = String.valueOf(random.nextInt());
+ String randomValue = String.valueOf(random.nextInt() % 1024);
runThread(randomKey, randomValue, randomValue);
assertEquals(0, poolMap.size(randomKey));
}
@@ -218,17 +218,14 @@ public class TestPoolMap {
public void testPoolCap() throws InterruptedException, ExecutionException {
// As long as we poll values we put, the pool size should remain zero
- String randomKey = String.valueOf(random.nextInt());
+ String randomKey = String.valueOf(random.nextInt() % 1024);
List randomValues = new ArrayList();
for (int i = 0; i < POOL_SIZE * 2; i++) {
- String randomValue = String.valueOf(random.nextInt());
+ String randomValue = String.valueOf(random.nextInt() % 1024);
randomValues.add(randomValue);
runThread(randomKey, randomValue, randomValue);
}
assertEquals(0, poolMap.size(randomKey));
}
-
}
-
}
-
diff --git a/pom.xml b/pom.xml
index 6b7b65e..0a06793 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1484,7 +1484,7 @@
org.apache.maven.plugins
maven-surefire-plugin
- -enableassertions -Xmx1900m -XX:MaxPermSize=256m -Djava.security.egd=file:/dev/./urandom -Djava.net.preferIPv4Stack=true "-Djava.library.path=${hadoop.library.path};${java.library.path}"
+ -enableassertions -Xmx1900m -Xss128k -XX:MaxPermSize=256m -Djava.security.egd=file:/dev/./urandom -Djava.net.preferIPv4Stack=true "-Djava.library.path=${hadoop.library.path};${java.library.path}"
java.net.preferIPv4Stack