From 15d2b0dbd9cdc2009f4ee50f48386e2d8d654ee8 Mon Sep 17 00:00:00 2001 From: mbautin Date: Thu, 9 Feb 2012 20:10:32 -0800 Subject: [PATCH] TestForceCacheImportantBlocks --- .../apache/hadoop/hbase/HBaseTestingUtility.java | 7 +- .../io/hfile/TestForceCacheImportantBlocks.java | 175 ++++++++++++++++++++ 2 files changed, 181 insertions(+), 1 deletions(-) create mode 100644 src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java diff --git src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 177a196..ec9d581 100644 --- src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1941,9 +1941,14 @@ public class HBaseTestingUtility { HColumnDescriptor.DEFAULT_BLOCKCACHE, HColumnDescriptor.DEFAULT_TTL, bloomType.toString()); - hcd.setBlocksize(HFile.DEFAULT_BLOCKSIZE); + hcd.setBlocksize(blockSize); hcd.setDataBlockEncoding(encoding); hcd.setEncodeOnDisk(encodeOnDisk); + return createTestRegion(tableName, hcd); + } + + public HRegion createTestRegion(String tableName, HColumnDescriptor hcd) + throws IOException { HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(hcd); HRegionInfo info = diff --git src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java new file mode 100644 index 0000000..5e70e77 --- /dev/null +++ src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; +import org.apache.hadoop.hbase.io.hfile.Compression.Algorithm; +import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.StoreFile.BloomType; +import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; +import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.BlockMetricType; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +/**W + * Make sure we always cache important block types, such as index blocks, as + * long as we have a block cache, even though block caching might be disabled + * for the column family. + */ +@Category(MediumTests.class) +@RunWith(Parameterized.class) +public class TestForceCacheImportantBlocks { + + private final HBaseTestingUtility TEST_UTIL = + new HBaseTestingUtility(); + + private static final String TABLE = "myTable"; + private static final String CF = "myCF"; + private static final byte[] CF_BYTES = Bytes.toBytes(CF); + private static final int MAX_VERSIONS = 3; + private static final int NUM_HFILES = 5; + + private static final int ROWS_PER_HFILE = 100; + private static final int NUM_ROWS = NUM_HFILES * ROWS_PER_HFILE; + private static final int NUM_COLS_PER_ROW = 50; + private static final int NUM_TIMESTAMPS_PER_COL = 50; + + /** Extremely small block size, so that we can get some index blocks */ + private static final int BLOCK_SIZE = 256; + + private static final Algorithm COMPRESSION_ALGORITHM = + Compression.Algorithm.GZ; + private static final BloomType BLOOM_TYPE = BloomType.ROW; + + private final int hfileVersion; + private final boolean cfCacheEnabled; + + @Parameters + public static Collection parameters() { + // HFile versions + return Arrays.asList(new Object[][] { + new Object[] { new Integer(1), false }, + new Object[] { new Integer(1), true }, + new Object[] { new Integer(2), false }, + new Object[] { new Integer(2), true } + }); + } + + public TestForceCacheImportantBlocks(int hfileVersion, + boolean cfCacheEnabled) { + this.hfileVersion = hfileVersion; + this.cfCacheEnabled = cfCacheEnabled; + TEST_UTIL.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, + hfileVersion); + } + + @Test + public void testCacheBlocks() throws IOException { + // Set index block size to be the same as normal block size. + TEST_UTIL.getConfiguration().setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, + BLOCK_SIZE); + + SchemaMetrics.setUseTableNameInTest(false); + HColumnDescriptor hcd = + new HColumnDescriptor(Bytes.toBytes(CF), MAX_VERSIONS, + COMPRESSION_ALGORITHM.getName(), + HColumnDescriptor.DEFAULT_IN_MEMORY, + HColumnDescriptor.DEFAULT_BLOCKCACHE, + HColumnDescriptor.DEFAULT_TTL, + BLOOM_TYPE.toString()); + hcd.setBlocksize(BLOCK_SIZE); + hcd.setBlockCacheEnabled(cfCacheEnabled); + HRegion region = TEST_UTIL.createTestRegion(TABLE, hcd); + writeTestData(region); + Map metricsBefore = SchemaMetrics.getMetricsSnapshot(); + for (int i = 0; i < NUM_ROWS; ++i) { + Get get = new Get(Bytes.toBytes("row" + i)); + region.get(get, null); + } + SchemaMetrics.validateMetricChanges(metricsBefore); + Map metricsAfter = SchemaMetrics.getMetricsSnapshot(); + Map metricsDelta = SchemaMetrics.diffMetrics(metricsBefore, + metricsAfter); + SchemaMetrics metrics = SchemaMetrics.getInstance(TABLE, CF); + List importantBlockCategories = + new ArrayList(); + importantBlockCategories.add(BlockCategory.BLOOM); + if (hfileVersion == 2) { + // We only have index blocks for HFile v2. + importantBlockCategories.add(BlockCategory.INDEX); + } + + for (BlockCategory category : importantBlockCategories) { + String hitsMetricName = getMetricName(metrics, category); + assertTrue("Metric " + hitsMetricName + " was not incremented", + metricsDelta.containsKey(hitsMetricName)); + long hits = metricsDelta.get(hitsMetricName); + assertTrue("Invalid value of " + hitsMetricName + ": " + hits, hits > 0); + } + + if (!cfCacheEnabled) { + // Caching is turned off for the CF, so make sure we are not caching data + // blocks. + String dataHitMetricName = getMetricName(metrics, BlockCategory.DATA); + assertFalse("Nonzero value for metric " + dataHitMetricName, + metricsDelta.containsKey(dataHitMetricName)); + } + } + + private String getMetricName(SchemaMetrics metrics, BlockCategory category) { + String hitsMetricName = + metrics.getBlockMetricName(category, SchemaMetrics.NO_COMPACTION, + BlockMetricType.CACHE_HIT); + return hitsMetricName; + } + + private void writeTestData(HRegion region) throws IOException { + for (int i = 0; i < NUM_ROWS; ++i) { + Put put = new Put(Bytes.toBytes("row" + i)); + for (int j = 0; j < NUM_COLS_PER_ROW; ++j) { + for (long ts = 1; ts < NUM_TIMESTAMPS_PER_COL; ++ts) { + put.add(CF_BYTES, Bytes.toBytes("col" + j), ts, + Bytes.toBytes("value" + i + "_" + j + "_" + ts)); + } + } + region.put(put); + if ((i + 1) % ROWS_PER_HFILE == 0) { + region.flushcache(); + } + } + } + +} \ No newline at end of file -- 1.7.4.4