diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index 597de52..e774d65 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -18,18 +18,16 @@ */ package org.apache.hadoop.hbase.io.hfile; -import java.io.IOException; -import java.util.List; +import java.util.Iterator; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; /** * Block cache interface. Anything that implements the {@link Cacheable} * interface can be put in the cache. */ @InterfaceAudience.Private -public interface BlockCache { +public interface BlockCache extends Iterable { /** * Add block to cache. * @param cacheKey The block's cache key. @@ -113,16 +111,12 @@ public interface BlockCache { long getBlockCount(); /** - * Performs a BlockCache summary and returns a List of BlockCacheColumnFamilySummary objects. - * This method could be fairly heavyweight in that it evaluates the entire HBase file-system - * against what is in the RegionServer BlockCache. - *

- * The contract of this interface is to return the List in sorted order by Table name, then - * ColumnFamily. - * - * @param conf HBaseConfiguration - * @return List of BlockCacheColumnFamilySummary - * @throws IOException exception + * @return Iterator over the blocks in the cache. + */ + Iterator iterator(); + + /** + * @return The list of sub blockcaches that make up this one; returns null if no sub caches. */ - List getBlockCacheColumnFamilySummaries(Configuration conf) throws IOException; + BlockCache [] getBlockCaches(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheColumnFamilySummary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheColumnFamilySummary.java deleted file mode 100644 index d5b7a7a..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheColumnFamilySummary.java +++ /dev/null @@ -1,247 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.io.hfile; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.Writable; - -/** - * BlockCacheColumnFamilySummary represents a summary of the blockCache usage - * at Table/ColumnFamily granularity. - *

- * As ColumnFamilies are owned by Tables, a summary by ColumnFamily implies that - * the owning Table is included in the summarization. - * - */ -@InterfaceAudience.Private -public class BlockCacheColumnFamilySummary implements Writable, Comparable { - - private String table = ""; - private String columnFamily = ""; - private int blocks; - private long heapSize; - - /** - * Default constructor for Writable - */ - public BlockCacheColumnFamilySummary() { - - } - - /** - * - * @param table table - * @param columnFamily columnFamily - */ - public BlockCacheColumnFamilySummary(String table, String columnFamily) { - this.table = table; - this.columnFamily = columnFamily; - } - - /** - * - * @return table - */ - public String getTable() { - return table; - } - /** - * - * @param table (table that owns the cached block) - */ - public void setTable(String table) { - this.table = table; - } - /** - * - * @return columnFamily - */ - public String getColumnFamily() { - return columnFamily; - } - /** - * - * @param columnFamily (columnFamily that owns the cached block) - */ - public void setColumnFamily(String columnFamily) { - this.columnFamily = columnFamily; - } - - /** - * - * @return blocks in the cache - */ - public int getBlocks() { - return blocks; - } - /** - * - * @param blocks in the cache - */ - public void setBlocks(int blocks) { - this.blocks = blocks; - } - - /** - * - * @return heapSize in the cache - */ - public long getHeapSize() { - return heapSize; - } - - /** - * Increments the number of blocks in the cache for this entry - */ - public void incrementBlocks() { - this.blocks++; - } - - /** - * - * @param heapSize to increment - */ - public void incrementHeapSize(long heapSize) { - this.heapSize = this.heapSize + heapSize; - } - - /** - * - * @param heapSize (total heapSize for the table/CF) - */ - public void setHeapSize(long heapSize) { - this.heapSize = heapSize; - } - - @Override - public void readFields(DataInput in) throws IOException { - table = in.readUTF(); - columnFamily = in.readUTF(); - blocks = in.readInt(); - heapSize = in.readLong(); - } - - @Override - public void write(DataOutput out) throws IOException { - out.writeUTF(table); - out.writeUTF(columnFamily); - out.writeInt(blocks); - out.writeLong(heapSize); - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result - + ((columnFamily == null) ? 0 : columnFamily.hashCode()); - result = prime * result + ((table == null) ? 0 : table.hashCode()); - return result; - } - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - BlockCacheColumnFamilySummary other = (BlockCacheColumnFamilySummary) obj; - if (columnFamily == null) { - if (other.columnFamily != null) - return false; - } else if (!columnFamily.equals(other.columnFamily)) - return false; - if (table == null) { - if (other.table != null) - return false; - } else if (!table.equals(other.table)) - return false; - return true; - } - - - - @Override - public String toString() { - return "BlockCacheSummaryEntry [table=" + table + ", columnFamily=" - + columnFamily + ", blocks=" + blocks + ", heapSize=" + heapSize + "]"; - } - - /** - * Construct a BlockCacheSummaryEntry from a full StoreFile Path - *

- * The path is expected to be in the format of... - *
-   * hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744
-   * 
- * ... where:
- * '-ROOT-' = Table
- * '70236052' = Region
- * 'info' = ColumnFamily
- * '3944417774205889744' = StoreFile - * - * @param path (full StoreFile Path) - * @return BlockCacheSummaryEntry - */ - public static BlockCacheColumnFamilySummary createFromStoreFilePath(Path path) { - - // The full path will look something like this... - // hdfs://localhost:51169/user/doug.meil/-ROOT-/70236052/info/3944417774205889744 - // tbl region cf sf - String sp = path.toString(); - String s[] = sp.split("\\/"); - - BlockCacheColumnFamilySummary bcse = null; - if (s.length >= 4) { - // why 4? StoreFile, CF, Region, Table - String table = s[s.length - 4]; // 4th from the end - String cf = s[s.length - 2]; // 2nd from the end - bcse = new BlockCacheColumnFamilySummary(table, cf); - } - return bcse; - } - - @Override - public int compareTo(BlockCacheColumnFamilySummary o) { - int i = table.compareTo(o.getTable()); - if (i != 0) { - return i; - } - return columnFamily.compareTo(o.getColumnFamily()); - } - - /** - * Creates a new BlockCacheSummaryEntry - * - * @param e BlockCacheSummaryEntry - * @return new BlockCacheSummaryEntry - */ - public static BlockCacheColumnFamilySummary create(BlockCacheColumnFamilySummary e) { - BlockCacheColumnFamilySummary e2 = new BlockCacheColumnFamilySummary(); - e2.setTable(e.getTable()); - e2.setColumnFamily(e.getColumnFamily()); - return e2; - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java index 1a89c01..4cba54c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java @@ -88,4 +88,4 @@ public class BlockCacheKey implements HeapSize, java.io.Serializable { public long getOffset() { return offset; } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCachesIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCachesIterator.java new file mode 100644 index 0000000..9af3277 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCachesIterator.java @@ -0,0 +1,58 @@ +/** + * Copyright The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import java.util.Iterator; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Iterator over an array of BlockCache CachedBlocks. + */ +@InterfaceAudience.Private +class BlockCachesIterator implements Iterator { + int index = 0; + final BlockCache [] bcs; + Iterator current; + + BlockCachesIterator(final BlockCache [] blockCaches) { + this.bcs = blockCaches; + this.current = this.bcs[this.index].iterator(); + } + + @Override + public boolean hasNext() { + if (current.hasNext()) return true; + this.index++; + if (this.index >= this.bcs.length) return false; + this.current = this.bcs[this.index].iterator(); + return hasNext(); + } + + @Override + public CachedBlock next() { + return this.current.next(); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockPriority.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockPriority.java new file mode 100644 index 0000000..169b3ff --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockPriority.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +public enum BlockPriority { + /** + * Accessed a single time (used for scan-resistance) + */ + SINGLE, + /** + * Accessed multiple times + */ + MULTI, + /** + * Block from in-memory store + */ + MEMORY +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index 741c9e2..9f81145 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -451,6 +451,8 @@ public class CacheConfig { long lruCacheSize = (long) (mu.getMax() * cachePercentage); int blockSize = conf.getInt("hbase.offheapcache.minblocksize", HConstants.DEFAULT_BLOCKSIZE); long slabCacheOffHeapCacheSize = + conf.getFloat(SLAB_CACHE_OFFHEAP_PERCENTAGE_KEY, 0) == 0? + 0: (long) (conf.getFloat(SLAB_CACHE_OFFHEAP_PERCENTAGE_KEY, (float) 0) * DirectMemoryUtils.getDirectMemorySize()); if (slabCacheOffHeapCacheSize <= 0) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java index 9f51b9e..4674eff 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java @@ -27,7 +27,6 @@ import org.apache.hadoop.classification.InterfaceAudience; */ @InterfaceAudience.Private public class CacheStats { - /** Sliding window statistics. The number of metric periods to include in * sliding window hit ratio calculations. */ @@ -35,6 +34,7 @@ public class CacheStats { /** The number of getBlock requests that were cache hits */ private final AtomicLong hitCount = new AtomicLong(0); + /** * The number of getBlock requests that were cache hits, but only from * requests that were set to use the block cache. This is because all reads @@ -42,15 +42,19 @@ public class CacheStats { * into the block cache. See HBASE-2253 for more information. */ private final AtomicLong hitCachingCount = new AtomicLong(0); + /** The number of getBlock requests that were cache misses */ private final AtomicLong missCount = new AtomicLong(0); + /** * The number of getBlock requests that were cache misses, but only from * requests that were set to use the block cache. */ private final AtomicLong missCachingCount = new AtomicLong(0); + /** The number of times an eviction has occurred */ private final AtomicLong evictionCount = new AtomicLong(0); + /** The total number of blocks that have been evicted */ private final AtomicLong evictedBlockCount = new AtomicLong(0); @@ -89,8 +93,10 @@ public class CacheStats { @Override public String toString() { - return "hitCount=" + this.hitCount + ", hitCachingCount=" + this.hitCachingCount + - ", missCount=" + this.missCount; + return "hitCount=" + getHitCount() + ", hitCachingCount=" + getHitCachingCount() + + ", missCount=" + getMissCount() + ", missCachingCount=" + getMissCachingCount() + + ", evictionCount=" + getEvictionCount() + + ", evictedBlockCount=" + getEvictedCount(); } public void miss(boolean caching) { @@ -217,4 +223,4 @@ public class CacheStats { } return zeros; } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java index 24350ee..541a2a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java @@ -1,5 +1,4 @@ /** - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,110 +18,13 @@ package org.apache.hadoop.hbase.io.hfile; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.io.HeapSize; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.ClassSize; -/** - * Represents an entry in the {@link LruBlockCache}. - * - *

Makes the block memory-aware with {@link HeapSize} and Comparable - * to sort by access time for the LRU. It also takes care of priority by - * either instantiating as in-memory or handling the transition from single - * to multiple access. - */ @InterfaceAudience.Private -public class CachedBlock implements HeapSize, Comparable { - - public final static long PER_BLOCK_OVERHEAD = ClassSize.align( - ClassSize.OBJECT + (3 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_LONG) + - ClassSize.STRING + ClassSize.BYTE_BUFFER); - - static enum BlockPriority { - /** - * Accessed a single time (used for scan-resistance) - */ - SINGLE, - /** - * Accessed multiple times - */ - MULTI, - /** - * Block from in-memory store - */ - MEMORY - }; - - private final BlockCacheKey cacheKey; - private final Cacheable buf; - private volatile long accessTime; - private long size; - private BlockPriority priority; - - public CachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime) { - this(cacheKey, buf, accessTime, false); - } - - public CachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime, - boolean inMemory) { - this.cacheKey = cacheKey; - this.buf = buf; - this.accessTime = accessTime; - // We approximate the size of this class by the size of its name string - // plus the size of its byte buffer plus the overhead associated with all - // the base classes. We also include the base class - // sizes in the PER_BLOCK_OVERHEAD variable rather than align()ing them with - // their buffer lengths. This variable is used elsewhere in unit tests. - this.size = ClassSize.align(cacheKey.heapSize()) - + ClassSize.align(buf.heapSize()) + PER_BLOCK_OVERHEAD; - if(inMemory) { - this.priority = BlockPriority.MEMORY; - } else { - this.priority = BlockPriority.SINGLE; - } - } - - /** - * Block has been accessed. Update its local access time. - */ - public void access(long accessTime) { - this.accessTime = accessTime; - if(this.priority == BlockPriority.SINGLE) { - this.priority = BlockPriority.MULTI; - } - } - - public long heapSize() { - return size; - } - - @Override - public int compareTo(CachedBlock that) { - if(this.accessTime == that.accessTime) return 0; - return this.accessTime < that.accessTime ? 1 : -1; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj == null || getClass() != obj.getClass()) { - return false; - } - CachedBlock other = (CachedBlock) obj; - return compareTo(other) == 0; - } - - public Cacheable getBuffer() { - return this.buf; - } - - public BlockCacheKey getCacheKey() { - return this.cacheKey; - } - - public BlockPriority getPriority() { - return this.priority; - } -} +public interface CachedBlock extends Comparable { + BlockPriority getBlockPriority(); + BlockType getBlockType(); + long getOffset(); + long getSize(); + long getCachedTime(); + String getFilename(); +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java deleted file mode 100644 index d57b1f1..0000000 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java +++ /dev/null @@ -1,109 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.io.hfile; - -import com.google.common.collect.MinMaxPriorityQueue; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hbase.io.HeapSize; - -/** - * A memory-bound queue that will grow until an element brings - * total size >= maxSize. From then on, only entries that are sorted larger - * than the smallest current entry will be inserted/replaced. - * - *

Use this when you want to find the largest elements (according to their - * ordering, not their heap size) that consume as close to the specified - * maxSize as possible. Default behavior is to grow just above rather than - * just below specified max. - * - *

Object used in this queue must implement {@link HeapSize} as well as - * {@link Comparable}. - */ -@InterfaceAudience.Private -public class CachedBlockQueue implements HeapSize { - - private MinMaxPriorityQueue queue; - - private long heapSize; - private long maxSize; - - /** - * @param maxSize the target size of elements in the queue - * @param blockSize expected average size of blocks - */ - public CachedBlockQueue(long maxSize, long blockSize) { - int initialSize = (int)(maxSize / blockSize); - if(initialSize == 0) initialSize++; - queue = MinMaxPriorityQueue.expectedSize(initialSize).create(); - heapSize = 0; - this.maxSize = maxSize; - } - - /** - * Attempt to add the specified cached block to this queue. - * - *

If the queue is smaller than the max size, or if the specified element - * is ordered before the smallest element in the queue, the element will be - * added to the queue. Otherwise, there is no side effect of this call. - * @param cb block to try to add to the queue - */ - public void add(CachedBlock cb) { - if(heapSize < maxSize) { - queue.add(cb); - heapSize += cb.heapSize(); - } else { - CachedBlock head = queue.peek(); - if(cb.compareTo(head) > 0) { - heapSize += cb.heapSize(); - heapSize -= head.heapSize(); - if(heapSize > maxSize) { - queue.poll(); - } else { - heapSize += head.heapSize(); - } - queue.add(cb); - } - } - } - - /** - * @return The next element in this queue, or {@code null} if the queue is - * empty. - */ - public CachedBlock poll() { - return queue.poll(); - } - - /** - * @return The last element in this queue, or {@code null} if the queue is - * empty. - */ - public CachedBlock pollLast() { - return queue.pollLast(); - } - - /** - * Total size of all elements in this queue. - * @return size of all elements currently in queue, in bytes - */ - public long heapSize() { - return heapSize; - } -} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index eb03fbd..0a23717 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -18,11 +18,9 @@ */ package org.apache.hadoop.hbase.io.hfile; -import java.io.IOException; -import java.util.List; +import java.util.Iterator; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; @@ -128,12 +126,6 @@ public class CombinedBlockCache implements BlockCache, HeapSize { return lruCache.getBlockCount() + bucketCache.getBlockCount(); } - @Override - public List getBlockCacheColumnFamilySummaries( - Configuration conf) throws IOException { - throw new UnsupportedOperationException(); - } - private static class CombinedCacheStats extends CacheStats { private final CacheStats lruCacheStats; private final CacheStats bucketCacheStats; @@ -208,4 +200,14 @@ public class CombinedBlockCache implements BlockCache, HeapSize { return Double.isNaN(ratio) ? 0 : ratio; } } -} + + @Override + public Iterator iterator() { + return new BlockCachesIterator(getBlockCaches()); + } + + @Override + public BlockCache[] getBlockCaches() { + return new BlockCache [] {this.lruCache, this.bucketCache}; + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java index da7a56e..4676653 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java @@ -18,8 +18,7 @@ */ package org.apache.hadoop.hbase.io.hfile; -import java.io.IOException; -import java.util.List; +import java.util.Iterator; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -165,12 +164,6 @@ public class DoubleBlockCache implements ResizableBlockCache, HeapSize { } @Override - public List getBlockCacheColumnFamilySummaries( - Configuration conf) throws IOException { - return onHeapCache.getBlockCacheColumnFamilySummaries(conf); - } - - @Override public long getBlockCount() { return onHeapCache.getBlockCount() + offHeapCache.getBlockCount(); } @@ -179,4 +172,14 @@ public class DoubleBlockCache implements ResizableBlockCache, HeapSize { public void setMaxSize(long size) { this.onHeapCache.setMaxSize(size); } -} + + @Override + public Iterator iterator() { + return new BlockCachesIterator(getBlockCaches()); + } + + @Override + public BlockCache[] getBlockCaches() { + return new BlockCache [] {this.onHeapCache, this.offHeapCache}; + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index dead173..eb46a20 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -18,13 +18,10 @@ */ package org.apache.hadoop.hbase.io.hfile; -import java.io.IOException; import java.lang.ref.WeakReference; import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; import java.util.EnumMap; -import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.PriorityQueue; @@ -41,18 +38,15 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.io.hfile.CachedBlock.BlockPriority; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; -import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HasThread; import org.apache.hadoop.util.StringUtils; +import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; /** @@ -132,7 +126,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { static final int statThreadPeriod = 60 * 5; /** Concurrent map (the cache) */ - private final Map map; + private final Map map; /** Eviction lock (locked when eviction in process) */ private final ReentrantLock evictionLock = new ReentrantLock(true); @@ -272,7 +266,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { this.maxSize = maxSize; this.blockSize = blockSize; this.forceInMemory = forceInMemory; - map = new ConcurrentHashMap(mapInitialSize, + map = new ConcurrentHashMap(mapInitialSize, mapLoadFactor, mapConcurrencyLevel); this.minFactor = minFactor; this.acceptableFactor = acceptableFactor; @@ -315,7 +309,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { */ @Override public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) { - CachedBlock cb = map.get(cacheKey); + LruCachedBlock cb = map.get(cacheKey); if(cb != null) { // compare the contents, if they are not equal, we are in big trouble if (compare(buf, cb.getBuffer()) != 0) { @@ -327,7 +321,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { LOG.warn(msg); return; } - cb = new CachedBlock(cacheKey, buf, count.incrementAndGet(), inMemory); + cb = new LruCachedBlock(cacheKey, buf, count.incrementAndGet(), inMemory); long newSize = updateSizeMetrics(cb, false); map.put(cacheKey, cb); elements.incrementAndGet(); @@ -358,12 +352,12 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { /** * Helper function that updates the local size counter and also updates any * per-cf or per-blocktype metrics it can discern from given - * {@link CachedBlock} + * {@link LruCachedBlock} * * @param cb * @param evict */ - protected long updateSizeMetrics(CachedBlock cb, boolean evict) { + protected long updateSizeMetrics(LruCachedBlock cb, boolean evict) { long heapsize = cb.heapSize(); if (evict) { heapsize *= -1; @@ -383,7 +377,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { @Override public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat, boolean updateCacheMetrics) { - CachedBlock cb = map.get(cacheKey); + LruCachedBlock cb = map.get(cacheKey); if (cb == null) { if (!repeat && updateCacheMetrics) stats.miss(caching); if (victimHandler != null) { @@ -407,7 +401,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { @Override public boolean evictBlock(BlockCacheKey cacheKey) { - CachedBlock cb = map.get(cacheKey); + LruCachedBlock cb = map.get(cacheKey); if (cb == null) return false; evictBlock(cb, false); return true; @@ -446,7 +440,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { * EvictionThread * @return the heap size of evicted block */ - protected long evictBlock(CachedBlock block, boolean evictedByEvictionProcess) { + protected long evictBlock(LruCachedBlock block, boolean evictedByEvictionProcess) { map.remove(block.getCacheKey()); updateSizeMetrics(block, true); elements.decrementAndGet(); @@ -501,7 +495,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { memorySize()); // Scan entire map putting into appropriate buckets - for(CachedBlock cachedBlock : map.values()) { + for(LruCachedBlock cachedBlock : map.values()) { switch(cachedBlock.getPriority()) { case SINGLE: { bucketSingle.add(cachedBlock); @@ -597,23 +591,23 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { */ private class BlockBucket implements Comparable { - private CachedBlockQueue queue; + private LruCachedBlockQueue queue; private long totalSize = 0; private long bucketSize; public BlockBucket(long bytesToFree, long blockSize, long bucketSize) { this.bucketSize = bucketSize; - queue = new CachedBlockQueue(bytesToFree, blockSize); + queue = new LruCachedBlockQueue(bytesToFree, blockSize); totalSize = 0; } - public void add(CachedBlock block) { + public void add(LruCachedBlock block) { totalSize += block.heapSize(); queue.add(block); } public long free(long toFree) { - CachedBlock cb; + LruCachedBlock cb; long freedBytes = 0; while ((cb = queue.pollLast()) != null) { freedBytes += evictBlock(cb, true); @@ -812,36 +806,66 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { } @Override - public List getBlockCacheColumnFamilySummaries(Configuration conf) throws IOException { - - Map sfMap = FSUtils.getTableStoreFilePathMap( - FileSystem.get(conf), - FSUtils.getRootDir(conf)); - - // quirky, but it's a compound key and this is a shortcut taken instead of - // creating a class that would represent only a key. - Map bcs = - new HashMap(); - - for (CachedBlock cb : map.values()) { - String sf = cb.getCacheKey().getHfileName(); - Path path = sfMap.get(sf); - if ( path != null) { - BlockCacheColumnFamilySummary lookup = - BlockCacheColumnFamilySummary.createFromStoreFilePath(path); - BlockCacheColumnFamilySummary bcse = bcs.get(lookup); - if (bcse == null) { - bcse = BlockCacheColumnFamilySummary.create(lookup); - bcs.put(lookup,bcse); - } - bcse.incrementBlocks(); - bcse.incrementHeapSize(cb.heapSize()); + public Iterator iterator() { + final Iterator iterator = map.values().iterator(); + + return new Iterator() { + @Override + public boolean hasNext() { + return iterator.hasNext(); } - } - List list = - new ArrayList(bcs.values()); - Collections.sort( list ); - return list; + + @Override + public CachedBlock next() { + final LruCachedBlock b = iterator.next(); + return new CachedBlock() { + @Override + public String toString() { + return CachedBlockUtil.toString(this); + } + + @Override + public BlockPriority getBlockPriority() { + return b.getPriority(); + } + + @Override + public BlockType getBlockType() { + return b.getBuffer().getBlockType(); + } + + @Override + public long getOffset() { + return b.getCacheKey().getOffset(); + } + + @Override + public long getSize() { + return b.getBuffer().heapSize(); + } + + @Override + public long getCachedTime() { + return b.getCachedTime(); + } + + @Override + public String getFilename() { + return b.getCacheKey().getHfileName(); + } + + @Override + public int compareTo(CachedBlock other) { + return (int)(other.getOffset() - this.getOffset()); + } + }; + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + }; } // Simple calculators of sizes given factors and maxSize @@ -902,10 +926,11 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { return fileNames; } + @VisibleForTesting Map getBlockTypeCountsForTest() { Map counts = new EnumMap(BlockType.class); - for (CachedBlock cb : map.values()) { + for (LruCachedBlock cb : map.values()) { BlockType blockType = ((HFileBlock) cb.getBuffer()).getBlockType(); Integer count = counts.get(blockType); counts.put(blockType, (count == null ? 0 : count) + 1); @@ -916,7 +941,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { public Map getEncodingCountsForTest() { Map counts = new EnumMap(DataBlockEncoding.class); - for (CachedBlock block : map.values()) { + for (LruCachedBlock block : map.values()) { DataBlockEncoding encoding = ((HFileBlock) block.getBuffer()).getDataBlockEncoding(); Integer count = counts.get(encoding); @@ -929,4 +954,9 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { assert victimHandler == null; victimHandler = handler; } -} + + @Override + public BlockCache[] getBlockCaches() { + return null; + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java new file mode 100644 index 0000000..a8186ef --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java @@ -0,0 +1,126 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.io.HeapSize; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.ClassSize; + +/** + * Represents an entry in the {@link LruBlockCache}. + * + *

Makes the block memory-aware with {@link HeapSize} and Comparable + * to sort by access time for the LRU. It also takes care of priority by + * either instantiating as in-memory or handling the transition from single + * to multiple access. + */ +@InterfaceAudience.Private +public class LruCachedBlock implements HeapSize, Comparable { + + public final static long PER_BLOCK_OVERHEAD = ClassSize.align( + ClassSize.OBJECT + (3 * ClassSize.REFERENCE) + (3 * Bytes.SIZEOF_LONG) + + ClassSize.STRING + ClassSize.BYTE_BUFFER); + + private final BlockCacheKey cacheKey; + private final Cacheable buf; + private volatile long accessTime; + private long size; + private BlockPriority priority; + /** + * Time this block was cached. Presumes we are created just before we are added to the cache. + */ + private final long cachedTime = System.nanoTime(); + + public LruCachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime) { + this(cacheKey, buf, accessTime, false); + } + + public LruCachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime, + boolean inMemory) { + this.cacheKey = cacheKey; + this.buf = buf; + this.accessTime = accessTime; + // We approximate the size of this class by the size of its name string + // plus the size of its byte buffer plus the overhead associated with all + // the base classes. We also include the base class + // sizes in the PER_BLOCK_OVERHEAD variable rather than align()ing them with + // their buffer lengths. This variable is used elsewhere in unit tests. + this.size = ClassSize.align(cacheKey.heapSize()) + + ClassSize.align(buf.heapSize()) + PER_BLOCK_OVERHEAD; + if(inMemory) { + this.priority = BlockPriority.MEMORY; + } else { + this.priority = BlockPriority.SINGLE; + } + } + + /** + * Block has been accessed. + * @param accessTime Last access; this is actually a incremented sequence number rather than an + * actual time. + */ + public void access(long accessTime) { + this.accessTime = accessTime; + if(this.priority == BlockPriority.SINGLE) { + this.priority = BlockPriority.MULTI; + } + } + + /** + * @return Time we were cached at in nano seconds. + */ + public long getCachedTime() { + return this.cachedTime; + } + + public long heapSize() { + return size; + } + + @Override + public int compareTo(LruCachedBlock that) { + if(this.accessTime == that.accessTime) return 0; + return this.accessTime < that.accessTime ? 1 : -1; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + LruCachedBlock other = (LruCachedBlock) obj; + return compareTo(other) == 0; + } + + public Cacheable getBuffer() { + return this.buf; + } + + public BlockCacheKey getCacheKey() { + return this.cacheKey; + } + + public BlockPriority getPriority() { + return this.priority; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java new file mode 100644 index 0000000..f61aba6 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java @@ -0,0 +1,109 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import com.google.common.collect.MinMaxPriorityQueue; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.io.HeapSize; + +/** + * A memory-bound queue that will grow until an element brings + * total size >= maxSize. From then on, only entries that are sorted larger + * than the smallest current entry will be inserted/replaced. + * + *

Use this when you want to find the largest elements (according to their + * ordering, not their heap size) that consume as close to the specified + * maxSize as possible. Default behavior is to grow just above rather than + * just below specified max. + * + *

Object used in this queue must implement {@link HeapSize} as well as + * {@link Comparable}. + */ +@InterfaceAudience.Private +public class LruCachedBlockQueue implements HeapSize { + + private MinMaxPriorityQueue queue; + + private long heapSize; + private long maxSize; + + /** + * @param maxSize the target size of elements in the queue + * @param blockSize expected average size of blocks + */ + public LruCachedBlockQueue(long maxSize, long blockSize) { + int initialSize = (int)(maxSize / blockSize); + if(initialSize == 0) initialSize++; + queue = MinMaxPriorityQueue.expectedSize(initialSize).create(); + heapSize = 0; + this.maxSize = maxSize; + } + + /** + * Attempt to add the specified cached block to this queue. + * + *

If the queue is smaller than the max size, or if the specified element + * is ordered before the smallest element in the queue, the element will be + * added to the queue. Otherwise, there is no side effect of this call. + * @param cb block to try to add to the queue + */ + public void add(LruCachedBlock cb) { + if(heapSize < maxSize) { + queue.add(cb); + heapSize += cb.heapSize(); + } else { + LruCachedBlock head = queue.peek(); + if(cb.compareTo(head) > 0) { + heapSize += cb.heapSize(); + heapSize -= head.heapSize(); + if(heapSize > maxSize) { + queue.poll(); + } else { + heapSize += head.heapSize(); + } + queue.add(cb); + } + } + } + + /** + * @return The next element in this queue, or {@code null} if the queue is + * empty. + */ + public LruCachedBlock poll() { + return queue.poll(); + } + + /** + * @return The last element in this queue, or {@code null} if the queue is + * empty. + */ + public LruCachedBlock pollLast() { + return queue.pollLast(); + } + + /** + * Total size of all elements in this queue. + * @return size of all elements currently in queue, in bytes + */ + public long heapSize() { + return heapSize; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java index 090bfa3..528f034 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java @@ -361,14 +361,13 @@ public final class BucketAllocator { } } - public String getInfo() { + public String toString() { StringBuilder sb = new StringBuilder(1024); for (int i = 0; i < buckets.length; ++i) { Bucket b = buckets[i]; - sb.append(" Bucket ").append(i).append(": ").append(b.itemAllocationSize()); - sb.append(" freeCount=").append(b.freeCount()).append(" used=") - .append(b.usedCount()); - sb.append('\n'); + if (i > 0) sb.append(", "); + sb.append("bucket.").append(i).append(": size=").append(b.itemAllocationSize()); + sb.append(", freeCount=").append(b.freeCount()).append(", used=").append(b.usedCount()); } return sb.toString(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 36cd00b..71719c8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -31,6 +31,7 @@ import java.io.Serializable; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Comparator; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.PriorityQueue; @@ -48,15 +49,17 @@ import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.BlockCache; -import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; +import org.apache.hadoop.hbase.io.hfile.BlockPriority; +import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.CacheStats; import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; import org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager; +import org.apache.hadoop.hbase.io.hfile.CachedBlock; +import org.apache.hadoop.hbase.io.hfile.CachedBlockUtil; import org.apache.hadoop.hbase.io.hfile.CombinedBlockCache; import org.apache.hadoop.hbase.io.hfile.HFileBlock; import org.apache.hadoop.hbase.util.ConcurrentIndex; @@ -253,7 +256,7 @@ public class BucketCache implements BlockCache, HeapSize { ", capacity=" + StringUtils.byteDesc(capacity) + ", blockSize=" + StringUtils.byteDesc(blockSize) + ", writerThreadNum=" + writerThreadNum + ", writerQLen=" + writerQLen + ", persistencePath=" + - persistencePath); + persistencePath + ", bucketAllocator=" + this.bucketAllocator); } /** @@ -374,8 +377,9 @@ public class BucketCache implements BlockCache, HeapSize { if (lenRead != len) { throw new RuntimeException("Only " + lenRead + " bytes read, " + len + " expected"); } - Cacheable cachedBlock = bucketEntry.deserializerReference( - deserialiserMap).deserialize(bb, true); + CacheableDeserializer deserializer = + bucketEntry.deserializerReference(this.deserialiserMap); + Cacheable cachedBlock = deserializer.deserialize(bb, true); long timeTaken = System.nanoTime() - start; if (updateCacheMetrics) { cacheStats.hit(caching); @@ -955,28 +959,6 @@ public class BucketCache implements BlockCache, HeapSize { return numEvicted; } - - @Override - public List getBlockCacheColumnFamilySummaries( - Configuration conf) { - throw new UnsupportedOperationException(); - } - - static enum BlockPriority { - /** - * Accessed a single time (used for scan-resistance) - */ - SINGLE, - /** - * Accessed multiple times - */ - MULTI, - /** - * Block from in-memory store - */ - MEMORY - }; - /** * Item in cache. We expect this to be where most memory goes. Java uses 8 * bytes just for object headers; after this, we want to use as little as @@ -993,6 +975,10 @@ public class BucketCache implements BlockCache, HeapSize { byte deserialiserIndex; private volatile long accessTime; private BlockPriority priority; + /** + * Time this block was cached. Presumes we are created just before we are added to the cache. + */ + private final long cachedTime = System.nanoTime(); BucketEntry(long offset, int length, long accessTime, boolean inMemory) { setOffset(offset); @@ -1059,6 +1045,10 @@ public class BucketCache implements BlockCache, HeapSize { public boolean equals(Object that) { return this == that; } + + public long getCachedTime() { + return cachedTime; + } } /** @@ -1196,4 +1186,74 @@ public class BucketCache implements BlockCache, HeapSize { writerThread.join(); } } -} + + @Override + public Iterator iterator() { + // Don't bother with ramcache since stuff is in here only a little while. + final Iterator> i = + this.backingMap.entrySet().iterator(); + return new Iterator() { + @Override + public boolean hasNext() { + return i.hasNext(); + } + + @Override + public CachedBlock next() { + final Map.Entry e = i.next(); + return new CachedBlock() { + @Override + public String toString() { + return CachedBlockUtil.toString(this); + } + + @Override + public BlockPriority getBlockPriority() { + return e.getValue().getPriority(); + } + + @Override + public BlockType getBlockType() { + // Not held by BucketEntry. Could add it if wanted on BucketEntry creation. + return null; + } + + @Override + public long getOffset() { + return e.getKey().getOffset(); + } + + @Override + public long getSize() { + return e.getValue().getLength(); + } + + @Override + public long getCachedTime() { + return e.getValue().getCachedTime(); + } + + @Override + public String getFilename() { + return e.getKey().getHfileName(); + } + + @Override + public int compareTo(CachedBlock other) { + return (int)(this.getOffset() - other.getOffset()); + } + }; + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + + @Override + public BlockCache[] getBlockCaches() { + return null; + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java index 381c5c9..d9494e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java @@ -19,21 +19,20 @@ package org.apache.hadoop.hbase.io.hfile.slab; import java.nio.ByteBuffer; -import java.util.List; +import java.util.Iterator; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.BlockCache; -import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; import org.apache.hadoop.hbase.io.hfile.CacheStats; import org.apache.hadoop.hbase.io.hfile.Cacheable; import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; +import org.apache.hadoop.hbase.io.hfile.CachedBlock; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.util.StringUtils; @@ -317,16 +316,6 @@ public class SingleSizeCache implements BlockCache, HeapSize { return 0; } - /* - * Not implemented. Extremely costly to do this from the off heap cache, you'd - * need to copy every object on heap once - */ - @Override - public List getBlockCacheColumnFamilySummaries( - Configuration conf) { - throw new UnsupportedOperationException(); - } - /* Just a pair class, holds a reference to the parent cacheable */ private static class CacheablePair implements HeapSize { final CacheableDeserializer deserializer; @@ -351,4 +340,14 @@ public class SingleSizeCache implements BlockCache, HeapSize { + ClassSize.ATOMIC_LONG); } } -} + + @Override + public Iterator iterator() { + return null; + } + + @Override + public BlockCache[] getBlockCaches() { + return null; + } +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java index 561c6f4..2d90c44 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java @@ -20,7 +20,8 @@ package org.apache.hadoop.hbase.io.hfile.slab; import java.math.BigDecimal; -import java.util.List; +import java.util.Iterator; +import java.util.Map; import java.util.Map.Entry; import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; @@ -35,10 +36,13 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.BlockCache; -import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; +import org.apache.hadoop.hbase.io.hfile.BlockPriority; +import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.CacheStats; import org.apache.hadoop.hbase.io.hfile.Cacheable; +import org.apache.hadoop.hbase.io.hfile.CachedBlock; +import org.apache.hadoop.hbase.io.hfile.CachedBlockUtil; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.HasThread; import org.apache.hadoop.util.StringUtils; @@ -430,14 +434,74 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize { return numEvicted; } - /* - * Not implemented. Extremely costly to do this from the off heap cache, you'd - * need to copy every object on heap once - */ @Override - public List getBlockCacheColumnFamilySummaries( - Configuration conf) { - throw new UnsupportedOperationException(); + public Iterator iterator() { + // Don't bother with ramcache since stuff is in here only a little while. + final Iterator> i = + this.backingStore.entrySet().iterator(); + return new Iterator() { + + @Override + public boolean hasNext() { + return i.hasNext(); + } + + @Override + public CachedBlock next() { + final Map.Entry e = i.next(); + final Cacheable cacheable = e.getValue().getBlock(e.getKey(), false, false, false); + return new CachedBlock() { + @Override + public String toString() { + return CachedBlockUtil.toString(this); + } + + @Override + public BlockPriority getBlockPriority() { + return null; + } + + @Override + public BlockType getBlockType() { + return cacheable.getBlockType(); + } + + @Override + public long getOffset() { + return e.getKey().getOffset(); + } + + @Override + public long getSize() { + return cacheable.getSerializedLength(); + } + + @Override + public long getCachedTime() { + return -1; + } + + @Override + public String getFilename() { + return e.getKey().getHfileName(); + } + + @Override + public int compareTo(CachedBlock other) { + return (int)(this.getOffset() - other.getOffset()); + } + }; + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + }; } -} + @Override + public BlockCache[] getBlockCaches() { + return null; + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java index 7cbd4a4..7f4ca52 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java @@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; -import org.apache.hadoop.hbase.io.hfile.CachedBlock; +import org.apache.hadoop.hbase.io.hfile.LruCachedBlock; import org.apache.hadoop.hbase.io.hfile.LruBlockCache; import org.apache.hadoop.hbase.regionserver.DefaultMemStore; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -279,8 +279,8 @@ public class TestHeapSize { // CachedBlock Fixed Overhead // We really need "deep" sizing but ClassSize does not do this. // Perhaps we should do all these more in this style.... - cl = CachedBlock.class; - actual = CachedBlock.PER_BLOCK_OVERHEAD; + cl = LruCachedBlock.class; + actual = LruCachedBlock.PER_BLOCK_OVERHEAD; expected = ClassSize.estimateBase(cl, false); expected += ClassSize.estimateBase(String.class, false); expected += ClassSize.estimateBase(ByteBuffer.class, false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CachedBlockUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CachedBlockUtil.java new file mode 100644 index 0000000..858b207 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/CachedBlockUtil.java @@ -0,0 +1,130 @@ +package org.apache.hadoop.hbase.io.hfile; + +import com.yammer.metrics.core.Histogram; +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import com.yammer.metrics.core.MetricsRegistry; +import com.yammer.metrics.stats.Snapshot; + +import java.util.NavigableSet; +import java.util.NavigableMap; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.ConcurrentSkipListSet; + +import org.apache.hadoop.classification.InterfaceAudience; + +@InterfaceAudience.Private +public class CachedBlockUtil { + private static final MetricsRegistry METRICS = new MetricsRegistry(); + + /** + * @param cb + * @return The block content as String. + */ + public static String toString(final CachedBlock cb) { + return "filename=" + cb.getFilename() + ", " + toStringMinusFileName(cb); + } + + /** + * @param cb + * @return The block content as a String minus the filename. + */ + public static String toStringMinusFileName(final CachedBlock cb) { + return "offset=" + cb.getOffset() + + ", size=" + cb.getSize() + + ", age=" + cb.getCachedTime() + + ", type=" + cb.getBlockType() + + ", priority=" + cb.getBlockPriority(); + } + + /** + * Use one of these to keep a running account of cached blocks by file. Throw it away when done. + * This is different than metrics in that it is stats on current state of a cache. TODO: move + * some of these counts out to metrics/stats. + */ + static class CachedBlocksByFile { + private int count; + private int dataBlockCount; + private long size; + private long dataSize; + + /** + * Map by filename. use concurent utils because we want our Map and contained blocks sorted. + */ + private NavigableMap> cachedBlockByFile = + new ConcurrentSkipListMap>(); + Histogram age = METRICS.newHistogram(CachedBlocksByFile.class, "age"); + + void update(final CachedBlock cb) { + NavigableSet set = this.cachedBlockByFile.get(cb.getFilename()); + if (set == null) { + set = new ConcurrentSkipListSet(); + this.cachedBlockByFile.put(cb.getFilename(), set); + } + set.add(cb); + this.size += cb.getSize(); + this.count++; + BlockType bt = cb.getBlockType(); + if (bt != null && bt.isData()) { + this.dataBlockCount++; + this.dataSize += cb.getSize(); + } + this.age.update(cb.getCachedTime()); + } + + public NavigableMap> getCachedBlockStatsByFile() { + return this.cachedBlockByFile; + } + + /** + * @return count of blocks in the cache + */ + public int getCount() { + return count; + } + + /** + * @return size of blocks in the cache + */ + public long getSize() { + return size; + } + + /** + * @return Size of data. + */ + public long getDataSize() { + return dataSize; + } + + public Histogram getAgeHistogram() { + return this.age; + } + + @Override + public String toString() { + Snapshot snapshot = this.age.getSnapshot(); + return "count=" + count + ", dataBlockCount=" + this.dataBlockCount + ", size=" + size + + ", dataSize=" + getDataSize() + + ", mean age=" + this.age.mean() + ", stddev age=" + this.age.stdDev() + + ", min age=" + this.age.min() + ", max age=" + this.age.max() + + ", 95th percentile age=" + snapshot.get95thPercentile() + + ", 99th percentile age=" + snapshot.get99thPercentile(); + } + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheColumnFamilySummary.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheColumnFamilySummary.java deleted file mode 100644 index a45dad0..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheColumnFamilySummary.java +++ /dev/null @@ -1,124 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hbase.io.hfile; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -import java.util.HashMap; -import java.util.Map; - -import org.apache.hadoop.hbase.SmallTests; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -/** - * Tests the BlockCacheColumnFamilySummary class - * - */ -@Category(SmallTests.class) -public class TestBlockCacheColumnFamilySummary { - - - /** - * - */ - @Test - public void testEquals() { - - BlockCacheColumnFamilySummary e1 = new BlockCacheColumnFamilySummary(); - e1.setTable("table1"); - e1.setColumnFamily("cf1"); - - BlockCacheColumnFamilySummary e2 = new BlockCacheColumnFamilySummary(); - e2.setTable("table1"); - e2.setColumnFamily("cf1"); - - assertEquals("bcse", e1, e2); - } - - /** - * - */ - @Test - public void testNotEquals() { - - BlockCacheColumnFamilySummary e1 = new BlockCacheColumnFamilySummary(); - e1.setTable("table1"); - e1.setColumnFamily("cf1"); - - BlockCacheColumnFamilySummary e2 = new BlockCacheColumnFamilySummary(); - e2.setTable("tablexxxxxx"); - e2.setColumnFamily("cf1"); - - assertTrue("bcse", ! e1.equals(e2)); - } - - /** - * - */ - @Test - public void testMapLookup() { - - Map bcs = - new HashMap(); - - BlockCacheColumnFamilySummary e1 = new BlockCacheColumnFamilySummary("table1","cf1"); - - BlockCacheColumnFamilySummary lookup = bcs.get(e1); - - if (lookup == null) { - lookup = BlockCacheColumnFamilySummary.create(e1); - bcs.put(e1,lookup); - lookup.incrementBlocks(); - lookup.incrementHeapSize(100L); - } - - BlockCacheColumnFamilySummary e2 = new BlockCacheColumnFamilySummary("table1","cf1"); - - BlockCacheColumnFamilySummary l2 = bcs.get(e2); - assertEquals("blocks",1,l2.getBlocks()); - assertEquals("heap",100L,l2.getHeapSize()); - } - - /** - * - */ - @Test - public void testMapEntry() { - - Map bcs = - new HashMap(); - - BlockCacheColumnFamilySummary e1 = new BlockCacheColumnFamilySummary("table1","cf1"); - bcs.put(e1, e1); - - BlockCacheColumnFamilySummary e2 = new BlockCacheColumnFamilySummary("table1","cf1"); - bcs.put(e2, e2); - - BlockCacheColumnFamilySummary e3 = new BlockCacheColumnFamilySummary("table1","cf1"); - bcs.put(e3, e3); - - assertEquals("mapSize",1,bcs.size()); - } - - -} - diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java new file mode 100644 index 0000000..aa92a36 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java @@ -0,0 +1,146 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import static org.junit.Assert.*; + +import java.util.Map; +import java.util.NavigableSet; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.io.hfile.TestCacheConfig.DataCacheEntry; +import org.apache.hadoop.hbase.io.hfile.TestCacheConfig.IndexCacheEntry; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(SmallTests.class) +public class TestBlockCacheReporting { + private static final Log LOG = LogFactory.getLog(TestBlockCacheReporting.class); + private Configuration conf; + + @Before + public void setUp() throws Exception { + CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null; + this.conf = HBaseConfiguration.create(); + } + + @After + public void tearDown() throws Exception { + // Let go of current block cache. + CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null; + } + + private void addDataAndHits(final BlockCache bc, final int count) { + Cacheable dce = new DataCacheEntry(); + Cacheable ice = new IndexCacheEntry(); + for (int i = 0; i < count; i++) { + BlockCacheKey bckd = new BlockCacheKey("f", i); + BlockCacheKey bcki = new BlockCacheKey("f", i + count); + bc.getBlock(bckd, true, false, true); + bc.cacheBlock(bckd, dce); + bc.cacheBlock(bcki, ice); + bc.getBlock(bckd, true, false, true); + bc.getBlock(bcki, true, false, true); + } + assertEquals(2 * count /*Data and Index blocks*/, bc.getStats().getHitCount()); + } + + @Test + public void testSlabCacheConfig() { + this.conf.setFloat(CacheConfig.SLAB_CACHE_OFFHEAP_PERCENTAGE_KEY, 0.1f); + CacheConfig cc = new CacheConfig(this.conf); + assertTrue(cc.getBlockCache() instanceof DoubleBlockCache); + logPerBlock(cc.getBlockCache()); + final int count = 3; + addDataAndHits(cc.getBlockCache(), count); + LOG.info(cc.getBlockCache().getStats()); + CachedBlockUtil.CachedBlocksByFile cbsbf = logPerBlock(cc.getBlockCache()); + LOG.info(cbsbf); + logPerFile(cbsbf); + } + + @Test + public void testBucketCache() { + this.conf.set(CacheConfig.BUCKET_CACHE_IOENGINE_KEY, "offheap"); + this.conf.setInt(CacheConfig.BUCKET_CACHE_SIZE_KEY, 100); + this.conf.setFloat(CacheConfig.BUCKET_CACHE_COMBINED_PERCENTAGE_KEY, 0.8f); + CacheConfig cc = new CacheConfig(this.conf); + assertTrue(cc.getBlockCache() instanceof CombinedBlockCache); + logPerBlock(cc.getBlockCache()); + final int count = 3; + addDataAndHits(cc.getBlockCache(), count); + LOG.info(cc.getBlockCache().getStats()); + CachedBlockUtil.CachedBlocksByFile cbsbf = logPerBlock(cc.getBlockCache()); + LOG.info(cbsbf); + logPerFile(cbsbf); + } + + @Test + public void testLruBlockCache() { + CacheConfig cc = new CacheConfig(this.conf); + assertTrue(cc.isBlockCacheEnabled()); + assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory()); + assertTrue(cc.getBlockCache() instanceof LruBlockCache); + logPerBlock(cc.getBlockCache()); + addDataAndHits(cc.getBlockCache(), 3); + BlockCache bc = cc.getBlockCache(); + LOG.info("count=" + bc.getBlockCount() + ", currentSize=" + bc.getCurrentSize() + + ", freeSize=" + bc.getFreeSize() + + ", evictedCount=" + bc.getEvictedCount()); + LOG.info(cc.getBlockCache().getStats()); + CachedBlockUtil.CachedBlocksByFile cbsbf = logPerBlock(cc.getBlockCache()); + LOG.info(cbsbf); + logPerFile(cbsbf); + } + + private void logPerFile(final CachedBlockUtil.CachedBlocksByFile cbsbf) { + for (Map.Entry> e: + cbsbf.getCachedBlockStatsByFile().entrySet()) { + int count = 0; + long size = 0; + int countData = 0; + long sizeData = 0; + for (CachedBlock cb: e.getValue()) { + count++; + size += cb.getSize(); + BlockType bt = cb.getBlockType(); + if (bt != null && bt.isData()) { + countData++; + sizeData += cb.getSize(); + } + } + LOG.info("filename=" + e.getKey() + ", count=" + count + ", countData=" + countData + + ", size=" + size + ", sizeData=" + sizeData); + } + } + + private CachedBlockUtil.CachedBlocksByFile logPerBlock(final BlockCache bc) { + CachedBlockUtil.CachedBlocksByFile cbsbf = new CachedBlockUtil.CachedBlocksByFile(); + for (CachedBlock cb: bc) { + LOG.info(cb.toString()); + cbsbf.update(cb); + } + return cbsbf; + } +} \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java index 919fa9b..b9d6e66 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java @@ -24,6 +24,8 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.nio.ByteBuffer; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.SmallTests; @@ -31,38 +33,65 @@ import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; -import org.mortbay.log.Log; /** * Tests that {@link CacheConfig} does as expected. */ @Category(SmallTests.class) public class TestCacheConfig { + private static final Log LOG = LogFactory.getLog(TestCacheConfig.class); private Configuration conf; + static class Deserializer implements CacheableDeserializer { + private final Cacheable cacheable; + + Deserializer(final Cacheable c) { + this.cacheable = c; + } + + @Override + public int getDeserialiserIdentifier() { + return 0; + } + + @Override + public Cacheable deserialize(ByteBuffer b, boolean reuse) throws IOException { + LOG.info("Deserialized " + b + ", reuse=" + reuse); + return cacheable; + } + + @Override + public Cacheable deserialize(ByteBuffer b) throws IOException { + LOG.info("Deserialized " + b); + return cacheable; + } + }; + + static class IndexCacheEntry extends DataCacheEntry { + private static IndexCacheEntry SINGLETON = new IndexCacheEntry(); + + public IndexCacheEntry() { + super(SINGLETON); + } + + @Override + public BlockType getBlockType() { + return BlockType.ROOT_INDEX; + } + } + static class DataCacheEntry implements Cacheable { private static final int SIZE = 1; private static DataCacheEntry SINGLETON = new DataCacheEntry(); + final CacheableDeserializer deserializer; - private final CacheableDeserializer deserializer = - new CacheableDeserializer() { - @Override - public int getDeserialiserIdentifier() { - return 0; - } - - @Override - public Cacheable deserialize(ByteBuffer b, boolean reuse) throws IOException { - Log.info("Deserialized " + b + ", reuse=" + reuse); - return SINGLETON; - } - - @Override - public Cacheable deserialize(ByteBuffer b) throws IOException { - Log.info("Deserialized " + b); - return SINGLETON; - } - }; + DataCacheEntry() { + this(SINGLETON); + } + + DataCacheEntry(final Cacheable c) { + this.deserializer = new Deserializer(c); + } public String toString() { return "size=" + SIZE + ", type=" + getBlockType(); @@ -80,7 +109,7 @@ public class TestCacheConfig { @Override public void serialize(ByteBuffer destination) { - Log.info("Serialized " + this + " to " + destination); + LOG.info("Serialized " + this + " to " + destination); } @Override @@ -119,7 +148,7 @@ public class TestCacheConfig { * to onheap and offheap caches. * @param sizing True if we should run sizing test (doesn't always apply). */ - private void basicBlockCacheOps(final CacheConfig cc, final boolean doubling, + void basicBlockCacheOps(final CacheConfig cc, final boolean doubling, final boolean sizing) { assertTrue(cc.isBlockCacheEnabled()); assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java index d7f9cbb..1bec7f4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCachedBlockQueue.java @@ -40,7 +40,7 @@ public class TestCachedBlockQueue extends TestCase { CachedBlock cb9 = new CachedBlock(1000, "cb9", 9); CachedBlock cb10 = new CachedBlock(1500, "cb10", 10); - CachedBlockQueue queue = new CachedBlockQueue(10000,1000); + LruCachedBlockQueue queue = new LruCachedBlockQueue(10000,1000); queue.add(cb1); queue.add(cb2); @@ -78,7 +78,7 @@ public class TestCachedBlockQueue extends TestCase { CachedBlock cb9 = new CachedBlock(1000, "cb9", 9); CachedBlock cb10 = new CachedBlock(1500, "cb10", 10); - CachedBlockQueue queue = new CachedBlockQueue(10000,1000); + LruCachedBlockQueue queue = new LruCachedBlockQueue(10000,1000); queue.add(cb1); queue.add(cb2); @@ -110,7 +110,7 @@ public class TestCachedBlockQueue extends TestCase { } } - private static class CachedBlock extends org.apache.hadoop.hbase.io.hfile.CachedBlock + private static class CachedBlock extends org.apache.hadoop.hbase.io.hfile.LruCachedBlock { public CachedBlock(final long heapSize, String name, long accessTime) { super(new BlockCacheKey(name, 0), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java index d0d27b3..de841e4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLruBlockCache.java @@ -720,7 +720,7 @@ public class TestLruBlockCache { (numEntries * ClassSize.CONCURRENT_HASHMAP_ENTRY) + (LruBlockCache.DEFAULT_CONCURRENCY_LEVEL * ClassSize.CONCURRENT_HASHMAP_SEGMENT); long negateBlockSize = (long)(totalOverhead/numEntries); - negateBlockSize += CachedBlock.PER_BLOCK_OVERHEAD; + negateBlockSize += LruCachedBlock.PER_BLOCK_OVERHEAD; return ClassSize.align((long)Math.floor((roughBlockSize - negateBlockSize)*0.99f)); } @@ -732,7 +732,7 @@ public class TestLruBlockCache { (numEntries * ClassSize.CONCURRENT_HASHMAP_ENTRY) + (LruBlockCache.DEFAULT_CONCURRENCY_LEVEL * ClassSize.CONCURRENT_HASHMAP_SEGMENT); long negateBlockSize = totalOverhead / numEntries; - negateBlockSize += CachedBlock.PER_BLOCK_OVERHEAD; + negateBlockSize += LruCachedBlock.PER_BLOCK_OVERHEAD; return ClassSize.align((long)Math.floor((roughBlockSize - negateBlockSize)* LruBlockCache.DEFAULT_ACCEPTABLE_FACTOR)); } @@ -754,7 +754,7 @@ public class TestLruBlockCache { /** Size of the cache block holding this item. Used for verification. */ public long cacheBlockHeapSize() { - return CachedBlock.PER_BLOCK_OVERHEAD + return LruCachedBlock.PER_BLOCK_OVERHEAD + ClassSize.align(cacheKey.heapSize()) + ClassSize.align(size); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java index 77889d5..12eff6c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.catalog.CatalogTracker; -import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary; +import org.apache.hadoop.hbase.io.hfile.BlockCacheSnapshot; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; import org.apache.hadoop.hbase.io.hfile.CacheStats; import org.apache.hadoop.hbase.io.hfile.Cacheable; @@ -332,7 +332,7 @@ public class TestHeapMemoryManager { } @Override - public List getBlockCacheColumnFamilySummaries(Configuration conf) + public List getBlockCacheSummary(Configuration conf) throws IOException { return null; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileBlockCacheSummary.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileBlockCacheSummary.java deleted file mode 100644 index 59dab69..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileBlockCacheSummary.java +++ /dev/null @@ -1,151 +0,0 @@ -/* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.MediumTests; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.HTableUtil; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.client.Row; -import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.io.hfile.BlockCache; -import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; -import org.apache.hadoop.hbase.util.Bytes; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -/** - * Tests the block cache summary functionality in StoreFile, - * which contains the BlockCache - * - */ -@Category(MediumTests.class) -public class TestStoreFileBlockCacheSummary { - final Log LOG = LogFactory.getLog(getClass()); - private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final String TEST_TABLE = "testTable"; - private static final String TEST_TABLE2 = "testTable2"; - private static final String TEST_CF = "testFamily"; - private static byte [] FAMILY = Bytes.toBytes(TEST_CF); - private static byte [] QUALIFIER = Bytes.toBytes("testQualifier"); - private static byte [] VALUE = Bytes.toBytes("testValue"); - - private final int TOTAL_ROWS = 4; - - /** - * @throws java.lang.Exception exception - */ - @BeforeClass - public static void setUpBeforeClass() throws Exception { - TEST_UTIL.startMiniCluster(); - } - - /** - * @throws java.lang.Exception exception - */ - @AfterClass - public static void tearDownAfterClass() throws Exception { - TEST_UTIL.shutdownMiniCluster(); - } - - - private Put createPut(byte[] family, String row) { - Put put = new Put( Bytes.toBytes(row)); - put.add(family, QUALIFIER, VALUE); - return put; - } - - /** - * This test inserts data into multiple tables and then reads both tables to ensure - * they are in the block cache. - * - * @throws Exception exception - */ - @Test - public void testBlockCacheSummary() throws Exception { - HTable ht = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE), FAMILY); - addRows(ht, FAMILY); - - HTable ht2 = TEST_UTIL.createTable(Bytes.toBytes(TEST_TABLE2), FAMILY); - addRows(ht2, FAMILY); - - TEST_UTIL.flush(); - - scan(ht, FAMILY); - scan(ht2, FAMILY); - - BlockCache bc = - new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache(); - List bcs = - bc.getBlockCacheColumnFamilySummaries(TEST_UTIL.getConfiguration()); - LOG.info("blockCacheSummary: " + bcs); - - assertTrue("blockCache summary has " + bcs.size() + " entries", bcs.size() >= 2); - - BlockCacheColumnFamilySummary e = bcs.get(bcs.size()-2); - assertEquals("table", TEST_TABLE, e.getTable()); - assertEquals("cf", TEST_CF, e.getColumnFamily()); - - e = bcs.get(bcs.size()-1); - assertEquals("table", TEST_TABLE2, e.getTable()); - assertEquals("cf", TEST_CF, e.getColumnFamily()); - - ht.close(); - ht2.close(); - } - - private void addRows(HTable ht, byte[] family) throws IOException { - - List rows = new ArrayList(); - for (int i = 0; i < TOTAL_ROWS;i++) { - rows.add(createPut(family, "row" + i)); - } - - HTableUtil.bucketRsBatch( ht, rows); - } - - private void scan(HTable ht, byte[] family) throws IOException { - Scan scan = new Scan(); - scan.addColumn(family, QUALIFIER); - - int count = 0; - for(@SuppressWarnings("unused") Result result : ht.getScanner(scan)) { - count++; - } - if (TOTAL_ROWS != count) { - throw new IOException("Incorrect number of rows!"); - } - } - -} - diff --git a/pom.xml b/pom.xml index e7079d3..e6172d0 100644 --- a/pom.xml +++ b/pom.xml @@ -910,7 +910,7 @@ 3.2.0 3.2.1 3.1 - 2.1.2 + 2.2.0 12.0.1 1.8.8 5.5.23