current;
+
+ BlockCachesIterator(final BlockCache [] blockCaches) {
+ this.bcs = blockCaches;
+ this.current = this.bcs[this.index].iterator();
+ }
+
+ @Override
+ public boolean hasNext() {
+ if (current.hasNext()) return true;
+ this.index++;
+ if (this.index >= this.bcs.length) return false;
+ this.current = this.bcs[this.index].iterator();
+ return hasNext();
+ }
+
+ @Override
+ public CachedBlock next() {
+ return this.current.next();
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockPriority.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockPriority.java
new file mode 100644
index 0000000..169b3ff
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockPriority.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile;
+
+public enum BlockPriority {
+ /**
+ * Accessed a single time (used for scan-resistance)
+ */
+ SINGLE,
+ /**
+ * Accessed multiple times
+ */
+ MULTI,
+ /**
+ * Block from in-memory store
+ */
+ MEMORY
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
index 4807a4d..de6aec7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
@@ -458,6 +458,8 @@ public class CacheConfig {
long lruCacheSize = (long) (mu.getMax() * cachePercentage);
int blockSize = conf.getInt("hbase.offheapcache.minblocksize", HConstants.DEFAULT_BLOCKSIZE);
long slabCacheOffHeapCacheSize =
+ conf.getFloat(SLAB_CACHE_OFFHEAP_PERCENTAGE_KEY, 0) == 0?
+ 0:
(long) (conf.getFloat(SLAB_CACHE_OFFHEAP_PERCENTAGE_KEY, (float) 0) *
DirectMemoryUtils.getDirectMemorySize());
if (slabCacheOffHeapCacheSize <= 0) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
index 9f51b9e..4674eff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
*/
@InterfaceAudience.Private
public class CacheStats {
-
/** Sliding window statistics. The number of metric periods to include in
* sliding window hit ratio calculations.
*/
@@ -35,6 +34,7 @@ public class CacheStats {
/** The number of getBlock requests that were cache hits */
private final AtomicLong hitCount = new AtomicLong(0);
+
/**
* The number of getBlock requests that were cache hits, but only from
* requests that were set to use the block cache. This is because all reads
@@ -42,15 +42,19 @@ public class CacheStats {
* into the block cache. See HBASE-2253 for more information.
*/
private final AtomicLong hitCachingCount = new AtomicLong(0);
+
/** The number of getBlock requests that were cache misses */
private final AtomicLong missCount = new AtomicLong(0);
+
/**
* The number of getBlock requests that were cache misses, but only from
* requests that were set to use the block cache.
*/
private final AtomicLong missCachingCount = new AtomicLong(0);
+
/** The number of times an eviction has occurred */
private final AtomicLong evictionCount = new AtomicLong(0);
+
/** The total number of blocks that have been evicted */
private final AtomicLong evictedBlockCount = new AtomicLong(0);
@@ -89,8 +93,10 @@ public class CacheStats {
@Override
public String toString() {
- return "hitCount=" + this.hitCount + ", hitCachingCount=" + this.hitCachingCount +
- ", missCount=" + this.missCount;
+ return "hitCount=" + getHitCount() + ", hitCachingCount=" + getHitCachingCount() +
+ ", missCount=" + getMissCount() + ", missCachingCount=" + getMissCachingCount() +
+ ", evictionCount=" + getEvictionCount() +
+ ", evictedBlockCount=" + getEvictedCount();
}
public void miss(boolean caching) {
@@ -217,4 +223,4 @@ public class CacheStats {
}
return zeros;
}
-}
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java
index 24350ee..541a2a3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlock.java
@@ -1,5 +1,4 @@
/**
- *
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -19,110 +18,13 @@
package org.apache.hadoop.hbase.io.hfile;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.io.HeapSize;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.ClassSize;
-/**
- * Represents an entry in the {@link LruBlockCache}.
- *
- * Makes the block memory-aware with {@link HeapSize} and Comparable
- * to sort by access time for the LRU. It also takes care of priority by
- * either instantiating as in-memory or handling the transition from single
- * to multiple access.
- */
@InterfaceAudience.Private
-public class CachedBlock implements HeapSize, Comparable {
-
- public final static long PER_BLOCK_OVERHEAD = ClassSize.align(
- ClassSize.OBJECT + (3 * ClassSize.REFERENCE) + (2 * Bytes.SIZEOF_LONG) +
- ClassSize.STRING + ClassSize.BYTE_BUFFER);
-
- static enum BlockPriority {
- /**
- * Accessed a single time (used for scan-resistance)
- */
- SINGLE,
- /**
- * Accessed multiple times
- */
- MULTI,
- /**
- * Block from in-memory store
- */
- MEMORY
- };
-
- private final BlockCacheKey cacheKey;
- private final Cacheable buf;
- private volatile long accessTime;
- private long size;
- private BlockPriority priority;
-
- public CachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime) {
- this(cacheKey, buf, accessTime, false);
- }
-
- public CachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime,
- boolean inMemory) {
- this.cacheKey = cacheKey;
- this.buf = buf;
- this.accessTime = accessTime;
- // We approximate the size of this class by the size of its name string
- // plus the size of its byte buffer plus the overhead associated with all
- // the base classes. We also include the base class
- // sizes in the PER_BLOCK_OVERHEAD variable rather than align()ing them with
- // their buffer lengths. This variable is used elsewhere in unit tests.
- this.size = ClassSize.align(cacheKey.heapSize())
- + ClassSize.align(buf.heapSize()) + PER_BLOCK_OVERHEAD;
- if(inMemory) {
- this.priority = BlockPriority.MEMORY;
- } else {
- this.priority = BlockPriority.SINGLE;
- }
- }
-
- /**
- * Block has been accessed. Update its local access time.
- */
- public void access(long accessTime) {
- this.accessTime = accessTime;
- if(this.priority == BlockPriority.SINGLE) {
- this.priority = BlockPriority.MULTI;
- }
- }
-
- public long heapSize() {
- return size;
- }
-
- @Override
- public int compareTo(CachedBlock that) {
- if(this.accessTime == that.accessTime) return 0;
- return this.accessTime < that.accessTime ? 1 : -1;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null || getClass() != obj.getClass()) {
- return false;
- }
- CachedBlock other = (CachedBlock) obj;
- return compareTo(other) == 0;
- }
-
- public Cacheable getBuffer() {
- return this.buf;
- }
-
- public BlockCacheKey getCacheKey() {
- return this.cacheKey;
- }
-
- public BlockPriority getPriority() {
- return this.priority;
- }
-}
+public interface CachedBlock extends Comparable {
+ BlockPriority getBlockPriority();
+ BlockType getBlockType();
+ long getOffset();
+ long getSize();
+ long getCachedTime();
+ String getFilename();
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java
deleted file mode 100644
index d57b1f1..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CachedBlockQueue.java
+++ /dev/null
@@ -1,109 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.io.hfile;
-
-import com.google.common.collect.MinMaxPriorityQueue;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.io.HeapSize;
-
-/**
- * A memory-bound queue that will grow until an element brings
- * total size >= maxSize. From then on, only entries that are sorted larger
- * than the smallest current entry will be inserted/replaced.
- *
- * Use this when you want to find the largest elements (according to their
- * ordering, not their heap size) that consume as close to the specified
- * maxSize as possible. Default behavior is to grow just above rather than
- * just below specified max.
- *
- *
Object used in this queue must implement {@link HeapSize} as well as
- * {@link Comparable}.
- */
-@InterfaceAudience.Private
-public class CachedBlockQueue implements HeapSize {
-
- private MinMaxPriorityQueue queue;
-
- private long heapSize;
- private long maxSize;
-
- /**
- * @param maxSize the target size of elements in the queue
- * @param blockSize expected average size of blocks
- */
- public CachedBlockQueue(long maxSize, long blockSize) {
- int initialSize = (int)(maxSize / blockSize);
- if(initialSize == 0) initialSize++;
- queue = MinMaxPriorityQueue.expectedSize(initialSize).create();
- heapSize = 0;
- this.maxSize = maxSize;
- }
-
- /**
- * Attempt to add the specified cached block to this queue.
- *
- * If the queue is smaller than the max size, or if the specified element
- * is ordered before the smallest element in the queue, the element will be
- * added to the queue. Otherwise, there is no side effect of this call.
- * @param cb block to try to add to the queue
- */
- public void add(CachedBlock cb) {
- if(heapSize < maxSize) {
- queue.add(cb);
- heapSize += cb.heapSize();
- } else {
- CachedBlock head = queue.peek();
- if(cb.compareTo(head) > 0) {
- heapSize += cb.heapSize();
- heapSize -= head.heapSize();
- if(heapSize > maxSize) {
- queue.poll();
- } else {
- heapSize += head.heapSize();
- }
- queue.add(cb);
- }
- }
- }
-
- /**
- * @return The next element in this queue, or {@code null} if the queue is
- * empty.
- */
- public CachedBlock poll() {
- return queue.poll();
- }
-
- /**
- * @return The last element in this queue, or {@code null} if the queue is
- * empty.
- */
- public CachedBlock pollLast() {
- return queue.pollLast();
- }
-
- /**
- * Total size of all elements in this queue.
- * @return size of all elements currently in queue, in bytes
- */
- public long heapSize() {
- return heapSize;
- }
-}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
index eb03fbd..7564cc2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
@@ -18,11 +18,9 @@
*/
package org.apache.hadoop.hbase.io.hfile;
-import java.io.IOException;
-import java.util.List;
+import java.util.Iterator;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory;
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
@@ -119,21 +117,10 @@ public class CombinedBlockCache implements BlockCache, HeapSize {
}
@Override
- public long getEvictedCount() {
- return lruCache.getEvictedCount() + bucketCache.getEvictedCount();
- }
-
- @Override
public long getBlockCount() {
return lruCache.getBlockCount() + bucketCache.getBlockCount();
}
- @Override
- public List getBlockCacheColumnFamilySummaries(
- Configuration conf) throws IOException {
- throw new UnsupportedOperationException();
- }
-
private static class CombinedCacheStats extends CacheStats {
private final CacheStats lruCacheStats;
private final CacheStats bucketCacheStats;
@@ -208,4 +195,14 @@ public class CombinedBlockCache implements BlockCache, HeapSize {
return Double.isNaN(ratio) ? 0 : ratio;
}
}
-}
+
+ @Override
+ public Iterator iterator() {
+ return new BlockCachesIterator(getBlockCaches());
+ }
+
+ @Override
+ public BlockCache[] getBlockCaches() {
+ return new BlockCache [] {this.lruCache, this.bucketCache};
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java
index da7a56e..558ca20 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/DoubleBlockCache.java
@@ -18,8 +18,7 @@
*/
package org.apache.hadoop.hbase.io.hfile;
-import java.io.IOException;
-import java.util.List;
+import java.util.Iterator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -153,10 +152,6 @@ public class DoubleBlockCache implements ResizableBlockCache, HeapSize {
return onHeapCache.getCurrentSize() + offHeapCache.getCurrentSize();
}
- public long getEvictedCount() {
- return onHeapCache.getEvictedCount() + offHeapCache.getEvictedCount();
- }
-
@Override
public int evictBlocksByHfileName(String hfileName) {
onHeapCache.evictBlocksByHfileName(hfileName);
@@ -165,12 +160,6 @@ public class DoubleBlockCache implements ResizableBlockCache, HeapSize {
}
@Override
- public List getBlockCacheColumnFamilySummaries(
- Configuration conf) throws IOException {
- return onHeapCache.getBlockCacheColumnFamilySummaries(conf);
- }
-
- @Override
public long getBlockCount() {
return onHeapCache.getBlockCount() + offHeapCache.getBlockCount();
}
@@ -179,4 +168,14 @@ public class DoubleBlockCache implements ResizableBlockCache, HeapSize {
public void setMaxSize(long size) {
this.onHeapCache.setMaxSize(size);
}
-}
+
+ @Override
+ public Iterator iterator() {
+ return new BlockCachesIterator(getBlockCaches());
+ }
+
+ @Override
+ public BlockCache[] getBlockCaches() {
+ return new BlockCache [] {this.onHeapCache, this.offHeapCache};
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
index dead173..b6b8ad4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java
@@ -18,13 +18,10 @@
*/
package org.apache.hadoop.hbase.io.hfile;
-import java.io.IOException;
import java.lang.ref.WeakReference;
import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Collections;
import java.util.EnumMap;
-import java.util.HashMap;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.PriorityQueue;
@@ -41,18 +38,16 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-import org.apache.hadoop.hbase.io.hfile.CachedBlock.BlockPriority;
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
-import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HasThread;
import org.apache.hadoop.util.StringUtils;
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import com.google.common.annotations.VisibleForTesting;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
/**
@@ -94,6 +89,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
* to the relative sizes and usage.
*/
@InterfaceAudience.Private
+@JsonIgnoreProperties({"encodingCountsForTest"})
public class LruBlockCache implements ResizableBlockCache, HeapSize {
static final Log LOG = LogFactory.getLog(LruBlockCache.class);
@@ -132,7 +128,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
static final int statThreadPeriod = 60 * 5;
/** Concurrent map (the cache) */
- private final Map map;
+ private final Map map;
/** Eviction lock (locked when eviction in process) */
private final ReentrantLock evictionLock = new ReentrantLock(true);
@@ -272,7 +268,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
this.maxSize = maxSize;
this.blockSize = blockSize;
this.forceInMemory = forceInMemory;
- map = new ConcurrentHashMap(mapInitialSize,
+ map = new ConcurrentHashMap(mapInitialSize,
mapLoadFactor, mapConcurrencyLevel);
this.minFactor = minFactor;
this.acceptableFactor = acceptableFactor;
@@ -315,7 +311,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
*/
@Override
public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory) {
- CachedBlock cb = map.get(cacheKey);
+ LruCachedBlock cb = map.get(cacheKey);
if(cb != null) {
// compare the contents, if they are not equal, we are in big trouble
if (compare(buf, cb.getBuffer()) != 0) {
@@ -327,7 +323,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
LOG.warn(msg);
return;
}
- cb = new CachedBlock(cacheKey, buf, count.incrementAndGet(), inMemory);
+ cb = new LruCachedBlock(cacheKey, buf, count.incrementAndGet(), inMemory);
long newSize = updateSizeMetrics(cb, false);
map.put(cacheKey, cb);
elements.incrementAndGet();
@@ -358,12 +354,12 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
/**
* Helper function that updates the local size counter and also updates any
* per-cf or per-blocktype metrics it can discern from given
- * {@link CachedBlock}
+ * {@link LruCachedBlock}
*
* @param cb
* @param evict
*/
- protected long updateSizeMetrics(CachedBlock cb, boolean evict) {
+ protected long updateSizeMetrics(LruCachedBlock cb, boolean evict) {
long heapsize = cb.heapSize();
if (evict) {
heapsize *= -1;
@@ -383,7 +379,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
@Override
public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat,
boolean updateCacheMetrics) {
- CachedBlock cb = map.get(cacheKey);
+ LruCachedBlock cb = map.get(cacheKey);
if (cb == null) {
if (!repeat && updateCacheMetrics) stats.miss(caching);
if (victimHandler != null) {
@@ -407,7 +403,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
@Override
public boolean evictBlock(BlockCacheKey cacheKey) {
- CachedBlock cb = map.get(cacheKey);
+ LruCachedBlock cb = map.get(cacheKey);
if (cb == null) return false;
evictBlock(cb, false);
return true;
@@ -446,7 +442,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
* EvictionThread
* @return the heap size of evicted block
*/
- protected long evictBlock(CachedBlock block, boolean evictedByEvictionProcess) {
+ protected long evictBlock(LruCachedBlock block, boolean evictedByEvictionProcess) {
map.remove(block.getCacheKey());
updateSizeMetrics(block, true);
elements.decrementAndGet();
@@ -501,7 +497,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
memorySize());
// Scan entire map putting into appropriate buckets
- for(CachedBlock cachedBlock : map.values()) {
+ for(LruCachedBlock cachedBlock : map.values()) {
switch(cachedBlock.getPriority()) {
case SINGLE: {
bucketSingle.add(cachedBlock);
@@ -597,23 +593,23 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
*/
private class BlockBucket implements Comparable {
- private CachedBlockQueue queue;
+ private LruCachedBlockQueue queue;
private long totalSize = 0;
private long bucketSize;
public BlockBucket(long bytesToFree, long blockSize, long bucketSize) {
this.bucketSize = bucketSize;
- queue = new CachedBlockQueue(bytesToFree, blockSize);
+ queue = new LruCachedBlockQueue(bytesToFree, blockSize);
totalSize = 0;
}
- public void add(CachedBlock block) {
+ public void add(LruCachedBlock block) {
totalSize += block.heapSize();
queue.add(block);
}
public long free(long toFree) {
- CachedBlock cb;
+ LruCachedBlock cb;
long freedBytes = 0;
while ((cb = queue.pollLast()) != null) {
freedBytes += evictBlock(cb, true);
@@ -668,7 +664,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
@Override
public long size() {
- return this.elements.get();
+ return getMaxSize();
}
@Override
@@ -676,18 +672,6 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
return this.elements.get();
}
- /**
- * Get the number of eviction runs that have occurred
- */
- public long getEvictionCount() {
- return this.stats.getEvictionCount();
- }
-
- @Override
- public long getEvictedCount() {
- return this.stats.getEvictedCount();
- }
-
EvictionThread getEvictionThread() {
return this.evictionThread;
}
@@ -812,36 +796,68 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
}
@Override
- public List getBlockCacheColumnFamilySummaries(Configuration conf) throws IOException {
-
- Map sfMap = FSUtils.getTableStoreFilePathMap(
- FileSystem.get(conf),
- FSUtils.getRootDir(conf));
-
- // quirky, but it's a compound key and this is a shortcut taken instead of
- // creating a class that would represent only a key.
- Map bcs =
- new HashMap();
-
- for (CachedBlock cb : map.values()) {
- String sf = cb.getCacheKey().getHfileName();
- Path path = sfMap.get(sf);
- if ( path != null) {
- BlockCacheColumnFamilySummary lookup =
- BlockCacheColumnFamilySummary.createFromStoreFilePath(path);
- BlockCacheColumnFamilySummary bcse = bcs.get(lookup);
- if (bcse == null) {
- bcse = BlockCacheColumnFamilySummary.create(lookup);
- bcs.put(lookup,bcse);
- }
- bcse.incrementBlocks();
- bcse.incrementHeapSize(cb.heapSize());
+ public Iterator iterator() {
+ final Iterator iterator = map.values().iterator();
+
+ return new Iterator() {
+ private final long now = System.nanoTime();
+
+ @Override
+ public boolean hasNext() {
+ return iterator.hasNext();
}
- }
- List list =
- new ArrayList(bcs.values());
- Collections.sort( list );
- return list;
+
+ @Override
+ public CachedBlock next() {
+ final LruCachedBlock b = iterator.next();
+ return new CachedBlock() {
+ @Override
+ public String toString() {
+ return BlockCacheUtil.toString(this, now);
+ }
+
+ @Override
+ public BlockPriority getBlockPriority() {
+ return b.getPriority();
+ }
+
+ @Override
+ public BlockType getBlockType() {
+ return b.getBuffer().getBlockType();
+ }
+
+ @Override
+ public long getOffset() {
+ return b.getCacheKey().getOffset();
+ }
+
+ @Override
+ public long getSize() {
+ return b.getBuffer().heapSize();
+ }
+
+ @Override
+ public long getCachedTime() {
+ return b.getCachedTime();
+ }
+
+ @Override
+ public String getFilename() {
+ return b.getCacheKey().getHfileName();
+ }
+
+ @Override
+ public int compareTo(CachedBlock other) {
+ return (int)(other.getOffset() - this.getOffset());
+ }
+ };
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+ };
}
// Simple calculators of sizes given factors and maxSize
@@ -902,10 +918,11 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
return fileNames;
}
+ @VisibleForTesting
Map getBlockTypeCountsForTest() {
Map counts =
new EnumMap(BlockType.class);
- for (CachedBlock cb : map.values()) {
+ for (LruCachedBlock cb : map.values()) {
BlockType blockType = ((HFileBlock) cb.getBuffer()).getBlockType();
Integer count = counts.get(blockType);
counts.put(blockType, (count == null ? 0 : count) + 1);
@@ -916,7 +933,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
public Map getEncodingCountsForTest() {
Map counts =
new EnumMap(DataBlockEncoding.class);
- for (CachedBlock block : map.values()) {
+ for (LruCachedBlock block : map.values()) {
DataBlockEncoding encoding =
((HFileBlock) block.getBuffer()).getDataBlockEncoding();
Integer count = counts.get(encoding);
@@ -929,4 +946,9 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize {
assert victimHandler == null;
victimHandler = handler;
}
-}
+
+ @Override
+ public BlockCache[] getBlockCaches() {
+ return null;
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java
new file mode 100644
index 0000000..a8186ef
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlock.java
@@ -0,0 +1,126 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.io.HeapSize;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ClassSize;
+
+/**
+ * Represents an entry in the {@link LruBlockCache}.
+ *
+ * Makes the block memory-aware with {@link HeapSize} and Comparable
+ * to sort by access time for the LRU. It also takes care of priority by
+ * either instantiating as in-memory or handling the transition from single
+ * to multiple access.
+ */
+@InterfaceAudience.Private
+public class LruCachedBlock implements HeapSize, Comparable {
+
+ public final static long PER_BLOCK_OVERHEAD = ClassSize.align(
+ ClassSize.OBJECT + (3 * ClassSize.REFERENCE) + (3 * Bytes.SIZEOF_LONG) +
+ ClassSize.STRING + ClassSize.BYTE_BUFFER);
+
+ private final BlockCacheKey cacheKey;
+ private final Cacheable buf;
+ private volatile long accessTime;
+ private long size;
+ private BlockPriority priority;
+ /**
+ * Time this block was cached. Presumes we are created just before we are added to the cache.
+ */
+ private final long cachedTime = System.nanoTime();
+
+ public LruCachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime) {
+ this(cacheKey, buf, accessTime, false);
+ }
+
+ public LruCachedBlock(BlockCacheKey cacheKey, Cacheable buf, long accessTime,
+ boolean inMemory) {
+ this.cacheKey = cacheKey;
+ this.buf = buf;
+ this.accessTime = accessTime;
+ // We approximate the size of this class by the size of its name string
+ // plus the size of its byte buffer plus the overhead associated with all
+ // the base classes. We also include the base class
+ // sizes in the PER_BLOCK_OVERHEAD variable rather than align()ing them with
+ // their buffer lengths. This variable is used elsewhere in unit tests.
+ this.size = ClassSize.align(cacheKey.heapSize())
+ + ClassSize.align(buf.heapSize()) + PER_BLOCK_OVERHEAD;
+ if(inMemory) {
+ this.priority = BlockPriority.MEMORY;
+ } else {
+ this.priority = BlockPriority.SINGLE;
+ }
+ }
+
+ /**
+ * Block has been accessed.
+ * @param accessTime Last access; this is actually a incremented sequence number rather than an
+ * actual time.
+ */
+ public void access(long accessTime) {
+ this.accessTime = accessTime;
+ if(this.priority == BlockPriority.SINGLE) {
+ this.priority = BlockPriority.MULTI;
+ }
+ }
+
+ /**
+ * @return Time we were cached at in nano seconds.
+ */
+ public long getCachedTime() {
+ return this.cachedTime;
+ }
+
+ public long heapSize() {
+ return size;
+ }
+
+ @Override
+ public int compareTo(LruCachedBlock that) {
+ if(this.accessTime == that.accessTime) return 0;
+ return this.accessTime < that.accessTime ? 1 : -1;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null || getClass() != obj.getClass()) {
+ return false;
+ }
+ LruCachedBlock other = (LruCachedBlock) obj;
+ return compareTo(other) == 0;
+ }
+
+ public Cacheable getBuffer() {
+ return this.buf;
+ }
+
+ public BlockCacheKey getCacheKey() {
+ return this.cacheKey;
+ }
+
+ public BlockPriority getPriority() {
+ return this.priority;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java
new file mode 100644
index 0000000..f61aba6
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruCachedBlockQueue.java
@@ -0,0 +1,109 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.io.hfile;
+
+import com.google.common.collect.MinMaxPriorityQueue;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.io.HeapSize;
+
+/**
+ * A memory-bound queue that will grow until an element brings
+ * total size >= maxSize. From then on, only entries that are sorted larger
+ * than the smallest current entry will be inserted/replaced.
+ *
+ * Use this when you want to find the largest elements (according to their
+ * ordering, not their heap size) that consume as close to the specified
+ * maxSize as possible. Default behavior is to grow just above rather than
+ * just below specified max.
+ *
+ *
Object used in this queue must implement {@link HeapSize} as well as
+ * {@link Comparable}.
+ */
+@InterfaceAudience.Private
+public class LruCachedBlockQueue implements HeapSize {
+
+ private MinMaxPriorityQueue queue;
+
+ private long heapSize;
+ private long maxSize;
+
+ /**
+ * @param maxSize the target size of elements in the queue
+ * @param blockSize expected average size of blocks
+ */
+ public LruCachedBlockQueue(long maxSize, long blockSize) {
+ int initialSize = (int)(maxSize / blockSize);
+ if(initialSize == 0) initialSize++;
+ queue = MinMaxPriorityQueue.expectedSize(initialSize).create();
+ heapSize = 0;
+ this.maxSize = maxSize;
+ }
+
+ /**
+ * Attempt to add the specified cached block to this queue.
+ *
+ * If the queue is smaller than the max size, or if the specified element
+ * is ordered before the smallest element in the queue, the element will be
+ * added to the queue. Otherwise, there is no side effect of this call.
+ * @param cb block to try to add to the queue
+ */
+ public void add(LruCachedBlock cb) {
+ if(heapSize < maxSize) {
+ queue.add(cb);
+ heapSize += cb.heapSize();
+ } else {
+ LruCachedBlock head = queue.peek();
+ if(cb.compareTo(head) > 0) {
+ heapSize += cb.heapSize();
+ heapSize -= head.heapSize();
+ if(heapSize > maxSize) {
+ queue.poll();
+ } else {
+ heapSize += head.heapSize();
+ }
+ queue.add(cb);
+ }
+ }
+ }
+
+ /**
+ * @return The next element in this queue, or {@code null} if the queue is
+ * empty.
+ */
+ public LruCachedBlock poll() {
+ return queue.poll();
+ }
+
+ /**
+ * @return The last element in this queue, or {@code null} if the queue is
+ * empty.
+ */
+ public LruCachedBlock pollLast() {
+ return queue.pollLast();
+ }
+
+ /**
+ * Total size of all elements in this queue.
+ * @return size of all elements currently in queue, in bytes
+ */
+ public long heapSize() {
+ return heapSize;
+ }
+}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
index 8c1b58d..08cfa34 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
@@ -32,6 +32,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.BucketEntry;
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
/**
* This class is used to allocate a block with specified size and free the block
@@ -42,10 +43,12 @@ import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.BucketEntry;
* This class is not thread safe.
*/
@InterfaceAudience.Private
+@JsonIgnoreProperties({"indexStatistics", "freeSize", "usedSize"})
public final class BucketAllocator {
static final Log LOG = LogFactory.getLog(BucketAllocator.class);
- final private static class Bucket {
+ @JsonIgnoreProperties({"completelyFree", "uninstantiated"})
+ public final static class Bucket {
private long baseOffset;
private int itemAllocationSize, sizeIndex;
private int itemCount;
@@ -77,7 +80,7 @@ public final class BucketAllocator {
return sizeIndex;
}
- public int itemAllocationSize() {
+ public int getItemAllocationSize() {
return itemAllocationSize;
}
@@ -97,15 +100,15 @@ public final class BucketAllocator {
return usedCount;
}
- public int freeBytes() {
+ public int getFreeBytes() {
return freeCount * itemAllocationSize;
}
- public int usedBytes() {
+ public int getUsedBytes() {
return usedCount * itemAllocationSize;
}
- public long baseOffset() {
+ public long getBaseOffset() {
return baseOffset;
}
@@ -372,19 +375,18 @@ public final class BucketAllocator {
}
realCacheSize.addAndGet(foundLen);
buckets[bucketNo].addAllocation(foundOffset);
- usedSize += buckets[bucketNo].itemAllocationSize();
+ usedSize += buckets[bucketNo].getItemAllocationSize();
bucketSizeInfos[bucketSizeIndex].blockAllocated(b);
}
}
- public String getInfo() {
+ public String toString() {
StringBuilder sb = new StringBuilder(1024);
for (int i = 0; i < buckets.length; ++i) {
Bucket b = buckets[i];
- sb.append(" Bucket ").append(i).append(": ").append(b.itemAllocationSize());
- sb.append(" freeCount=").append(b.freeCount()).append(" used=")
- .append(b.usedCount());
- sb.append('\n');
+ if (i > 0) sb.append(", ");
+ sb.append("bucket.").append(i).append(": size=").append(b.getItemAllocationSize());
+ sb.append(", freeCount=").append(b.freeCount()).append(", used=").append(b.usedCount());
}
return sb.toString();
}
@@ -441,8 +443,8 @@ public final class BucketAllocator {
assert bucketNo >= 0 && bucketNo < buckets.length;
Bucket targetBucket = buckets[bucketNo];
bucketSizeInfos[targetBucket.sizeIndex()].freeBlock(targetBucket, offset);
- usedSize -= targetBucket.itemAllocationSize();
- return targetBucket.itemAllocationSize();
+ usedSize -= targetBucket.getItemAllocationSize();
+ return targetBucket.getItemAllocationSize();
}
public int sizeIndexOfAllocation(long offset) {
@@ -456,7 +458,7 @@ public final class BucketAllocator {
int bucketNo = (int) (offset / bucketCapacity);
assert bucketNo >= 0 && bucketNo < buckets.length;
Bucket targetBucket = buckets[bucketNo];
- return targetBucket.itemAllocationSize();
+ return targetBucket.getItemAllocationSize();
}
static class IndexStatistics {
@@ -506,6 +508,10 @@ public final class BucketAllocator {
}
}
+ public Bucket [] getBuckets() {
+ return this.buckets;
+ }
+
public void dumpToLog() {
logStatistics();
StringBuilder sb = new StringBuilder();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 21dc6d7..e77bfc2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -31,6 +31,7 @@ import java.io.Serializable;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Comparator;
+import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.PriorityQueue;
@@ -48,15 +49,17 @@ import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+import org.apache.hadoop.hbase.io.hfile.BlockPriority;
+import org.apache.hadoop.hbase.io.hfile.BlockType;
import org.apache.hadoop.hbase.io.hfile.CacheStats;
import org.apache.hadoop.hbase.io.hfile.Cacheable;
import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
import org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
+import org.apache.hadoop.hbase.io.hfile.CachedBlock;
+import org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
import org.apache.hadoop.hbase.io.hfile.CombinedBlockCache;
import org.apache.hadoop.hbase.io.hfile.HFileBlock;
import org.apache.hadoop.hbase.util.ConcurrentIndex;
@@ -255,7 +258,7 @@ public class BucketCache implements BlockCache, HeapSize {
", capacity=" + StringUtils.byteDesc(capacity) +
", blockSize=" + StringUtils.byteDesc(blockSize) + ", writerThreadNum=" +
writerThreadNum + ", writerQLen=" + writerQLen + ", persistencePath=" +
- persistencePath);
+ persistencePath + ", bucketAllocator=" + this.bucketAllocator);
}
/**
@@ -376,8 +379,9 @@ public class BucketCache implements BlockCache, HeapSize {
if (lenRead != len) {
throw new RuntimeException("Only " + lenRead + " bytes read, " + len + " expected");
}
- Cacheable cachedBlock = bucketEntry.deserializerReference(
- deserialiserMap).deserialize(bb, true);
+ CacheableDeserializer deserializer =
+ bucketEntry.deserializerReference(this.deserialiserMap);
+ Cacheable cachedBlock = deserializer.deserialize(bb, true);
long timeTaken = System.nanoTime() - start;
if (updateCacheMetrics) {
cacheStats.hit(caching);
@@ -897,7 +901,7 @@ public class BucketCache implements BlockCache, HeapSize {
return cacheStats;
}
- BucketAllocator getAllocator() {
+ public BucketAllocator getAllocator() {
return this.bucketAllocator;
}
@@ -926,11 +930,6 @@ public class BucketCache implements BlockCache, HeapSize {
return this.bucketAllocator.getUsedSize();
}
- @Override
- public long getEvictedCount() {
- return cacheStats.getEvictedCount();
- }
-
/**
* Evicts all blocks for a specific HFile.
*
@@ -957,28 +956,6 @@ public class BucketCache implements BlockCache, HeapSize {
return numEvicted;
}
-
- @Override
- public List getBlockCacheColumnFamilySummaries(
- Configuration conf) {
- throw new UnsupportedOperationException();
- }
-
- static enum BlockPriority {
- /**
- * Accessed a single time (used for scan-resistance)
- */
- SINGLE,
- /**
- * Accessed multiple times
- */
- MULTI,
- /**
- * Block from in-memory store
- */
- MEMORY
- };
-
/**
* Item in cache. We expect this to be where most memory goes. Java uses 8
* bytes just for object headers; after this, we want to use as little as
@@ -995,6 +972,10 @@ public class BucketCache implements BlockCache, HeapSize {
byte deserialiserIndex;
private volatile long accessTime;
private BlockPriority priority;
+ /**
+ * Time this block was cached. Presumes we are created just before we are added to the cache.
+ */
+ private final long cachedTime = System.nanoTime();
BucketEntry(long offset, int length, long accessTime, boolean inMemory) {
setOffset(offset);
@@ -1061,6 +1042,10 @@ public class BucketCache implements BlockCache, HeapSize {
public boolean equals(Object that) {
return this == that;
}
+
+ public long getCachedTime() {
+ return cachedTime;
+ }
}
/**
@@ -1198,4 +1183,76 @@ public class BucketCache implements BlockCache, HeapSize {
writerThread.join();
}
}
-}
+
+ @Override
+ public Iterator iterator() {
+ // Don't bother with ramcache since stuff is in here only a little while.
+ final Iterator> i =
+ this.backingMap.entrySet().iterator();
+ return new Iterator() {
+ private final long now = System.nanoTime();
+
+ @Override
+ public boolean hasNext() {
+ return i.hasNext();
+ }
+
+ @Override
+ public CachedBlock next() {
+ final Map.Entry e = i.next();
+ return new CachedBlock() {
+ @Override
+ public String toString() {
+ return BlockCacheUtil.toString(this, now);
+ }
+
+ @Override
+ public BlockPriority getBlockPriority() {
+ return e.getValue().getPriority();
+ }
+
+ @Override
+ public BlockType getBlockType() {
+ // Not held by BucketEntry. Could add it if wanted on BucketEntry creation.
+ return null;
+ }
+
+ @Override
+ public long getOffset() {
+ return e.getKey().getOffset();
+ }
+
+ @Override
+ public long getSize() {
+ return e.getValue().getLength();
+ }
+
+ @Override
+ public long getCachedTime() {
+ return e.getValue().getCachedTime();
+ }
+
+ @Override
+ public String getFilename() {
+ return e.getKey().getHfileName();
+ }
+
+ @Override
+ public int compareTo(CachedBlock other) {
+ return (int)(this.getOffset() - other.getOffset());
+ }
+ };
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+ };
+ }
+
+ @Override
+ public BlockCache[] getBlockCaches() {
+ return null;
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java
index 37c579a..cffe905 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCacheStats.java
@@ -34,6 +34,12 @@ public class BucketCacheStats extends CacheStats {
private final static int nanoTime = 1000000;
private long lastLogTime = EnvironmentEdgeManager.currentTimeMillis();
+ @Override
+ public String toString() {
+ return super.toString() + ", ioHitsPerSecond=" + getIOHitsPerSecond() +
+ ", ioTimePerHit=" + getIOTimePerHit();
+ }
+
public void ioHit(long time) {
ioHitCount.incrementAndGet();
ioHitTime.addAndGet(time);
@@ -43,7 +49,7 @@ public class BucketCacheStats extends CacheStats {
long now = EnvironmentEdgeManager.currentTimeMillis();
long took = (now - lastLogTime) / 1000;
lastLogTime = now;
- return ioHitCount.get() / took;
+ return took == 0? 0: ioHitCount.get() / took;
}
public double getIOTimePerHit() {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java
index 381c5c9..d9494e4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SingleSizeCache.java
@@ -19,21 +19,20 @@
package org.apache.hadoop.hbase.io.hfile.slab;
import java.nio.ByteBuffer;
-import java.util.List;
+import java.util.Iterator;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
import org.apache.hadoop.hbase.io.hfile.CacheStats;
import org.apache.hadoop.hbase.io.hfile.Cacheable;
import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
+import org.apache.hadoop.hbase.io.hfile.CachedBlock;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.util.StringUtils;
@@ -317,16 +316,6 @@ public class SingleSizeCache implements BlockCache, HeapSize {
return 0;
}
- /*
- * Not implemented. Extremely costly to do this from the off heap cache, you'd
- * need to copy every object on heap once
- */
- @Override
- public List getBlockCacheColumnFamilySummaries(
- Configuration conf) {
- throw new UnsupportedOperationException();
- }
-
/* Just a pair class, holds a reference to the parent cacheable */
private static class CacheablePair implements HeapSize {
final CacheableDeserializer deserializer;
@@ -351,4 +340,14 @@ public class SingleSizeCache implements BlockCache, HeapSize {
+ ClassSize.ATOMIC_LONG);
}
}
-}
+
+ @Override
+ public Iterator iterator() {
+ return null;
+ }
+
+ @Override
+ public BlockCache[] getBlockCaches() {
+ return null;
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java
index 561c6f4..4a264bb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/slab/SlabCache.java
@@ -20,7 +20,8 @@
package org.apache.hadoop.hbase.io.hfile.slab;
import java.math.BigDecimal;
-import java.util.List;
+import java.util.Iterator;
+import java.util.Map;
import java.util.Map.Entry;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
@@ -35,10 +36,13 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.apache.hadoop.hbase.io.hfile.BlockCacheColumnFamilySummary;
import org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+import org.apache.hadoop.hbase.io.hfile.BlockPriority;
+import org.apache.hadoop.hbase.io.hfile.BlockType;
import org.apache.hadoop.hbase.io.hfile.CacheStats;
import org.apache.hadoop.hbase.io.hfile.Cacheable;
+import org.apache.hadoop.hbase.io.hfile.CachedBlock;
+import org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.HasThread;
import org.apache.hadoop.util.StringUtils;
@@ -57,7 +61,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
@InterfaceAudience.Private
public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
private final ConcurrentHashMap backingStore;
- private final TreeMap sizer;
+ private final TreeMap slabs;
static final Log LOG = LogFactory.getLog(SlabCache.class);
static final int STAT_THREAD_PERIOD_SECS = 60 * 5;
@@ -100,10 +104,13 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
this.successfullyCachedStats = new SlabStats();
backingStore = new ConcurrentHashMap();
- sizer = new TreeMap();
+ slabs = new TreeMap();
this.scheduleThreadPool.scheduleAtFixedRate(new StatisticsThread(this),
STAT_THREAD_PERIOD_SECS, STAT_THREAD_PERIOD_SECS, TimeUnit.SECONDS);
+ }
+ public Map getSizer() {
+ return slabs;
}
/**
@@ -179,7 +186,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
* object is too large, returns null.
*/
Entry getHigherBlock(int size) {
- return sizer.higherEntry(size - 1);
+ return slabs.higherEntry(size - 1);
}
private BigDecimal[] stringArrayToBigDecimalArray(String[] parsee) {
@@ -193,7 +200,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
private void addSlab(int blockSize, int numBlocks) {
LOG.info("Creating slab of blockSize " + blockSize + " with " + numBlocks
+ " blocks, " + StringUtils.byteDesc(blockSize * (long) numBlocks) + "bytes.");
- sizer.put(blockSize, new SingleSizeCache(blockSize, numBlocks, this));
+ slabs.put(blockSize, new SingleSizeCache(blockSize, numBlocks, this));
}
/**
@@ -294,7 +301,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
* Also terminates the scheduleThreadPool.
*/
public void shutdown() {
- for (SingleSizeCache s : sizer.values()) {
+ for (SingleSizeCache s : slabs.values()) {
s.shutdown();
}
this.scheduleThreadPool.shutdown();
@@ -302,7 +309,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
public long heapSize() {
long childCacheSize = 0;
- for (SingleSizeCache s : sizer.values()) {
+ for (SingleSizeCache s : slabs.values()) {
childCacheSize += s.heapSize();
}
return SlabCache.CACHE_FIXED_OVERHEAD + childCacheSize;
@@ -314,7 +321,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
public long getFreeSize() {
long childFreeSize = 0;
- for (SingleSizeCache s : sizer.values()) {
+ for (SingleSizeCache s : slabs.values()) {
childFreeSize += s.getFreeSize();
}
return childFreeSize;
@@ -323,7 +330,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
@Override
public long getBlockCount() {
long count = 0;
- for (SingleSizeCache cache : sizer.values()) {
+ for (SingleSizeCache cache : slabs.values()) {
count += cache.getBlockCount();
}
return count;
@@ -352,7 +359,7 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
@Override
public void run() {
- for (SingleSizeCache s : ourcache.sizer.values()) {
+ for (SingleSizeCache s : ourcache.slabs.values()) {
s.logStats();
}
@@ -430,14 +437,75 @@ public class SlabCache implements SlabItemActionWatcher, BlockCache, HeapSize {
return numEvicted;
}
- /*
- * Not implemented. Extremely costly to do this from the off heap cache, you'd
- * need to copy every object on heap once
- */
@Override
- public List getBlockCacheColumnFamilySummaries(
- Configuration conf) {
- throw new UnsupportedOperationException();
+ public Iterator iterator() {
+ // Don't bother with ramcache since stuff is in here only a little while.
+ final Iterator> i =
+ this.backingStore.entrySet().iterator();
+ return new Iterator() {
+ private final long now = System.nanoTime();
+
+ @Override
+ public boolean hasNext() {
+ return i.hasNext();
+ }
+
+ @Override
+ public CachedBlock next() {
+ final Map.Entry e = i.next();
+ final Cacheable cacheable = e.getValue().getBlock(e.getKey(), false, false, false);
+ return new CachedBlock() {
+ @Override
+ public String toString() {
+ return BlockCacheUtil.toString(this, now);
+ }
+
+ @Override
+ public BlockPriority getBlockPriority() {
+ return null;
+ }
+
+ @Override
+ public BlockType getBlockType() {
+ return cacheable.getBlockType();
+ }
+
+ @Override
+ public long getOffset() {
+ return e.getKey().getOffset();
+ }
+
+ @Override
+ public long getSize() {
+ return cacheable == null? 0: cacheable.getSerializedLength();
+ }
+
+ @Override
+ public long getCachedTime() {
+ return -1;
+ }
+
+ @Override
+ public String getFilename() {
+ return e.getKey().getHfileName();
+ }
+
+ @Override
+ public int compareTo(CachedBlock other) {
+ return (int)(this.getOffset() - other.getOffset());
+ }
+ };
+ }
+
+ @Override
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+ };
}
-}
+ @Override
+ public BlockCache[] getBlockCaches() {
+ return null;
+ }
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/StateDumpServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/StateDumpServlet.java
index f991e3b..232f2d1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/StateDumpServlet.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/StateDumpServlet.java
@@ -70,4 +70,4 @@ public abstract class StateDumpServlet extends HttpServlet {
status.dumpTo(out, " ");
}
}
-}
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 7c198ad..ba6a516 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -2494,10 +2494,10 @@ public class HRegionServer extends HasThread implements
String regionNameStr = regionName == null?
encodedRegionName: Bytes.toStringBinary(regionName);
if (isOpening != null && isOpening.booleanValue()) {
- throw new RegionOpeningException("Region " + regionNameStr +
+ throw new RegionOpeningException("Region " + regionNameStr +
" is opening on " + this.serverName);
}
- throw new NotServingRegionException("Region " + regionNameStr +
+ throw new NotServingRegionException("Region " + regionNameStr +
" is not online on " + this.serverName);
}
return region;
@@ -2800,4 +2800,11 @@ public class HRegionServer extends HasThread implements
}
return result;
}
+
+ /**
+ * @return The cache config instance used by the regionserver.
+ */
+ public CacheConfig getCacheConfig() {
+ return this.cacheConfig;
+ }
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSStatusServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSStatusServlet.java
index 64ae859..cae705c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSStatusServlet.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSStatusServlet.java
@@ -39,20 +39,24 @@ public class RSStatusServlet extends HttpServlet {
HRegionServer hrs = (HRegionServer)getServletContext().getAttribute(
HRegionServer.REGIONSERVER);
assert hrs != null : "No RS in context!";
-
+
resp.setContentType("text/html");
-
+
if (!hrs.isOnline()) {
resp.getWriter().write("The RegionServer is initializing!");
resp.getWriter().close();
return;
}
-
+
RSStatusTmpl tmpl = new RSStatusTmpl();
if (req.getParameter("format") != null)
tmpl.setFormat(req.getParameter("format"));
if (req.getParameter("filter") != null)
tmpl.setFilter(req.getParameter("filter"));
+ if (req.getParameter("bcn") != null)
+ tmpl.setBcn(req.getParameter("bcn"));
+ if (req.getParameter("bcv") != null)
+ tmpl.setBcv(req.getParameter("bcv"));
tmpl.render(resp.getWriter(), hrs);
}
}
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
index be9c750..31fab53 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
@@ -92,9 +92,9 @@
- Home
- Table Details
- - Local logs
+ - Local Logs
- Log Level
- - Debug dump
+ - Debug Dump
- Metrics Dump
<% if (HBaseConfiguration.isShowConfInServlet()) { %>
- HBase Configuration
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 8df53cb..1f579e6 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -94,9 +94,9 @@