diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index 27a833f..c4148a1 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -905,7 +905,7 @@ possible configurations would overwhelm and obscure the important. A comma-separated list of sizes for buckets for the bucketcache. Can be multiple sizes. List block sizes in order from smallest to largest. The sizes you use will depend on your data access patterns. - Must be a multiple of 1024 else you will run into + Must be a multiple of 256 else you will run into 'java.io.IOException: Invalid HFile block magic' when you go to read from cache. If you specify no values here, then you pick up the default bucketsizes set in code (See BucketAllocator#DEFAULT_BUCKET_SIZES). diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index d514003..1b53864 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -651,7 +651,16 @@ public class CacheConfig { if (configuredBucketSizes != null) { bucketSizes = new int[configuredBucketSizes.length]; for (int i = 0; i < configuredBucketSizes.length; i++) { - bucketSizes[i] = Integer.parseInt(configuredBucketSizes[i].trim()); + int bucketSize = Integer.parseInt(configuredBucketSizes[i].trim()); + if (bucketSize % 256 != 0) { + // We need all the bucket sizes to be multiples of 256. Then only all possible block + // offsets in Buckets will come as multiples of 256. See BucketEntry where offset to each + // block is represented using 5 bytes (instead of 8 bytes long). We would like to save + // heap overhead as less as possible + throw new IllegalArgumentException("Illegal value: " + bucketSize + " cofigured for '" + + BUCKET_CACHE_BUCKETS_KEY + "'. All bucket sizes to be multiples of 256"); + } + bucketSizes[i] = bucketSize; } } BucketCache bucketCache = null;