diff --git a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java index e741760..48f73e2 100644 --- a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java +++ b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java @@ -278,4 +278,10 @@ public class MemcachedBlockCache implements BlockCache { // Not doing reference counting. All blocks here are EXCLUSIVE } + @Override + public List getBlockCacheColumnFamilySummaries( + Configuration conf) throws IOException, InterruptedException { + throw new UnsupportedOperationException(); + } + } diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon index 3a68d96..3632bb8 100644 --- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon +++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon @@ -87,6 +87,7 @@ org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
  • Metrics Dump
  • <%if HBaseConfiguration.isShowConfInServlet()%>
  • HBase Configuration
  • +
  • Cache Summary
  • diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java index cef7e02..ecee4b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java @@ -18,8 +18,11 @@ */ package org.apache.hadoop.hbase.io.hfile; +import java.io.IOException; import java.util.Iterator; +import java.util.List; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType; @@ -129,4 +132,19 @@ public interface BlockCache extends Iterable { * @param block the hfileblock to be returned */ void returnBlock(BlockCacheKey cacheKey, Cacheable block); + + /** + * Performs a BlockCache summary and returns a List of BlockCacheColumnFamilySummary objects. + * This method could be fairly heavyweight in that it evaluates the entire HBase file-system + * against what is in the RegionServer BlockCache. + *

    + * The contract of this interface is to return the List in sorted order by Table name, then + * ColumnFamily. + * + * @param conf HBaseConfiguration + * @return List of BlockCacheColumnFamilySummary + * @throws java.io.IOException exception + */ + public List getBlockCacheColumnFamilySummaries(Configuration conf) throws + IOException, InterruptedException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheColumnFamilySummary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheColumnFamilySummary.java new file mode 100644 index 0000000..7d832fc --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheColumnFamilySummary.java @@ -0,0 +1,239 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.util.StringUtils; + +/** + * BlockCacheColumnFamilySummary represents a summary of the blockCache usage + * at Table/ColumnFamily granularity. + *

    + * As ColumnFamilies are owned by Tables, a summary by ColumnFamily implies that + * the owning Table is included in the summarization. + * + */ +@InterfaceAudience.Private +public class BlockCacheColumnFamilySummary implements Writable, Comparable { + + private String table = ""; + private String columnFamily = ""; + private int blocks; + private long heapSize; + + /** + * Default constructor for Writable + */ + public BlockCacheColumnFamilySummary() { + + } + + /** + * + * @param table table + * @param columnFamily columnFamily + */ + public BlockCacheColumnFamilySummary(String table, String columnFamily) { + this.table = table; + this.columnFamily = columnFamily; + } + + /** + * + * @return table + */ + public String getTable() { + return table; + } + /** + * + * @param table (table that owns the cached block) + */ + public void setTable(String table) { + this.table = table; + } + /** + * + * @return columnFamily + */ + public String getColumnFamily() { + return columnFamily; + } + /** + * + * @param columnFamily (columnFamily that owns the cached block) + */ + public void setColumnFamily(String columnFamily) { + this.columnFamily = columnFamily; + } + + /** + * + * @return number of blocks in the cache + */ + public int getBlocks() { + return blocks; + } + /** + * + * @param blocks in the cache + */ + public void setBlocks(int blocks) { + this.blocks = blocks; + } + + /** + * + * @return heapSize in the cache + */ + public long getHeapSize() { + return heapSize; + } + + /** + * Increments the number of blocks in the cache for this entry + */ + public void incrementBlocks() { + this.blocks++; + } + + /** + * + * @param heapSize to increment + */ + public void incrementHeapSize(long heapSize) { + this.heapSize = this.heapSize + heapSize; + } + + /** + * + * @param heapSize (total heapSize for the table/CF) + */ + public void setHeapSize(long heapSize) { + this.heapSize = heapSize; + } + + @Override + public void readFields(DataInput in) throws IOException { + table = in.readUTF(); + columnFamily = in.readUTF(); + blocks = in.readInt(); + heapSize = in.readLong(); + } + + @Override + public void write(DataOutput out) throws IOException { + out.writeUTF(table); + out.writeUTF(columnFamily); + out.writeInt(blocks); + out.writeLong(heapSize); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + + ((columnFamily == null) ? 0 : columnFamily.hashCode()); + result = prime * result + ((table == null) ? 0 : table.hashCode()); + return result; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null) return false; + if (getClass() != obj.getClass()) return false; + BlockCacheColumnFamilySummary other = (BlockCacheColumnFamilySummary) obj; + if (columnFamily == null) { + if (other.columnFamily != null) return false; + } else if (!columnFamily.equals(other.columnFamily)) return false; + if (table == null) { + if (other.table != null) return false; + } else if (!table.equals(other.table)) return false; + return true; + } + + + + @Override + public String toString() { + return "BlockCacheColumnFamilySummary [table=" + table + ", columnFamily=" + + columnFamily + ", blocks=" + blocks + ", heapSize=" + + StringUtils.byteDesc(heapSize) + "]"; + } + + /** + * Construct a BlockCacheColumnFamilySummary from a full StoreFile Path + *

    + * The path is expected to be in the format of... + *
    +   * hdfs://localhost:51169/rootDir/namespace/table/70236052/info/3944417774205889744
    +   * 
    + * ... where:
    + * 'table' = Table Name
    + * '70236052' = Region
    + * 'info' = ColumnFamily
    + * '3944417774205889744' = StoreFile + * + * @param path (full StoreFile Path) + * @return BlockCacheColumnFamilySummary + */ + public static BlockCacheColumnFamilySummary createFromStoreFilePath(Path path) { + + String sp = path.toString(); + String s[] = sp.split("\\/"); + + BlockCacheColumnFamilySummary blockCacheColumnFamilySummary = null; + if (s.length >= 4) { + // why 4? StoreFile, CF, Region, Table + String table = s[s.length - 4]; // 4th from the end + String cf = s[s.length - 2]; // 2nd from the end + blockCacheColumnFamilySummary = new BlockCacheColumnFamilySummary(table, cf); + } + return blockCacheColumnFamilySummary; + } + + @Override + public int compareTo(BlockCacheColumnFamilySummary o) { + int i = table.compareTo(o.getTable()); + if (i != 0) { + return i; + } + return columnFamily.compareTo(o.getColumnFamily()); + } + + /** + * Creates a new BlockCacheColumnFamilySummary + * + * @param e BlockCacheColumnFamilySummary + * @return new BlockCacheColumnFamilySummary + */ + public static BlockCacheColumnFamilySummary create(BlockCacheColumnFamilySummary e) { + BlockCacheColumnFamilySummary e2 = new BlockCacheColumnFamilySummary(); + e2.setTable(e.getTable()); + e2.setColumnFamily(e.getColumnFamily()); + return e2; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheSizeDistributionSummary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheSizeDistributionSummary.java new file mode 100644 index 0000000..536fc9a --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheSizeDistributionSummary.java @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.util.StringUtils; + +/** + * BlockCacheSizeDistributionSummary represents a summary of the blockCache + * usage at a size granularity. Block data statistics from the minBlockSize to + * maxBlockSize
    + *
    + * + */ +@InterfaceAudience.Private +public class BlockCacheSizeDistributionSummary { + + private final int minBlockSize; + private final int maxBlockSize; + + private int blocks; + private long heapSize; + + public BlockCacheSizeDistributionSummary(int minBlockSize, int maxBlockSize) { + this.minBlockSize = minBlockSize; + this.maxBlockSize = maxBlockSize; + } + + /** + * Increments the number of blocks in the cache for this entry + */ + public void incrementBlocks() { + this.blocks++; + } + + /** + * + * @param heapSize to increment + */ + public void incrementHeapSize(long heapSize) { + this.heapSize = this.heapSize + heapSize; + } + + /** + * + * @return heapSize in the cache + */ + public long getHeapSize() { + return heapSize; + } + + /** + * + * @return number of blocks in the cache + */ + public int getBlocks() { + return blocks; + } + + @Override + public String toString() { + return "BlockCacheSizeDistributionSummary [" + + StringUtils.byteDesc(minBlockSize) + "<=blocksize<" + + StringUtils.byteDesc(maxBlockSize) + ", blocks=" + blocks + + ", heapSize=" + StringUtils.byteDesc(heapSize) + "]"; + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java index 4ceda39..5af1dac 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java @@ -18,8 +18,11 @@ */ package org.apache.hadoop.hbase.io.hfile; +import java.io.IOException; import java.util.Iterator; +import java.util.List; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; @@ -362,6 +365,12 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize { this.l2Cache.returnBlock(cacheKey, block); } + @Override + public List getBlockCacheColumnFamilySummaries( + Configuration conf) throws IOException, InterruptedException { + throw new UnsupportedOperationException(); + } + @VisibleForTesting public int getRefCount(BlockCacheKey cacheKey) { return ((BucketCache) this.l2Cache).getRefCount(cacheKey); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 3cced66..474b874 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -18,15 +18,10 @@ */ package org.apache.hadoop.hbase.io.hfile; +import java.io.IOException; import java.lang.ref.WeakReference; import java.nio.ByteBuffer; -import java.util.EnumMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.PriorityQueue; -import java.util.SortedSet; -import java.util.TreeSet; +import java.util.*; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -37,12 +32,15 @@ import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HasThread; import org.apache.hadoop.util.StringUtils; import org.codehaus.jackson.annotate.JsonIgnoreProperties; @@ -1155,7 +1153,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { return map; } - BlockCache getVictimHandler() { + public BlockCache getVictimHandler() { return this.victimHandler; } @@ -1175,4 +1173,70 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { this.victimHandler.returnBlock(cacheKey, block); } } + + public List getBlockSizeDistribution( + int granularitySize) { + Map summaryMap = new TreeMap<>(); + for (LruCachedBlock cb : map.values()) { + long blockSerializedLen = cb.getBuffer().getSerializedLength(); + int roundDownSize = (int) (blockSerializedLen / granularitySize * granularitySize); + int roundUpSize = roundDownSize + granularitySize; + BlockCacheSizeDistributionSummary distributionSummary = summaryMap + .get(roundDownSize); + if (distributionSummary == null) { + distributionSummary = new BlockCacheSizeDistributionSummary(roundDownSize,roundUpSize); + summaryMap.put(roundDownSize, distributionSummary); + } + distributionSummary.incrementBlocks(); + distributionSummary.incrementHeapSize(cb.heapSize()); + } + List list = new ArrayList( + summaryMap.values()); + return list; + } + + public List getBlockSizeDistribution() { + return getBlockSizeDistribution(4 * 1024); + } + + @Override + public List getBlockCacheColumnFamilySummaries(Configuration conf) throws + IOException, InterruptedException { + Map sfMap = FSUtils + .getTableStoreFilePathMap(FileSystem.get(conf), + FSUtils.getRootDir(conf)); + Map fileToCfMap = new HashMap(); + for (Map.Entry entry : sfMap.entrySet()) { + fileToCfMap.put(entry.getKey(), BlockCacheColumnFamilySummary + .createFromStoreFilePath(entry.getValue())); + } + return getBlockCacheColumnFamilySummaries(fileToCfMap); + } + + + private List getBlockCacheColumnFamilySummaries( + Map fileToCfMap) + throws IOException { + // quirky, but it's a compound key and this is a shortcut taken instead of + // creating a class that would represent only a key. + Map bcs = new HashMap(); + + for (LruCachedBlock cb : map.values()) { + String sf = cb.getCacheKey().getHfileName(); + BlockCacheColumnFamilySummary lookup = fileToCfMap.get(sf); + if (lookup != null) { + BlockCacheColumnFamilySummary bcse = bcs.get(lookup); + if (bcse == null) { + bcse = BlockCacheColumnFamilySummary.create(lookup); + bcs.put(lookup,bcse); + } + bcse.incrementBlocks(); + bcse.incrementHeapSize(cb.heapSize()); + } + } + List list = + new ArrayList(bcs.values()); + Collections.sort( list ); + return list; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java index d9ee64c..79dd5ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java @@ -196,6 +196,10 @@ public final class BucketAllocator { completelyFreeBuckets.put(b, b); } + public int blockSize(){ + return bucketSizes[sizeIndex]; + } + public int sizeIndex() { return sizeIndex; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 3c27f14..3d7aa39 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -29,15 +29,7 @@ import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.Serializable; import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Comparator; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.NavigableSet; -import java.util.PriorityQueue; -import java.util.Set; +import java.util.*; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; @@ -54,24 +46,15 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; -import org.apache.hadoop.hbase.io.hfile.BlockCache; -import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; -import org.apache.hadoop.hbase.io.hfile.BlockCacheUtil; -import org.apache.hadoop.hbase.io.hfile.BlockPriority; -import org.apache.hadoop.hbase.io.hfile.BlockType; -import org.apache.hadoop.hbase.io.hfile.CacheStats; -import org.apache.hadoop.hbase.io.hfile.Cacheable; +import org.apache.hadoop.hbase.io.hfile.*; import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType; -import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer; -import org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager; -import org.apache.hadoop.hbase.io.hfile.CachedBlock; -import org.apache.hadoop.hbase.io.hfile.HFileBlock; import org.apache.hadoop.hbase.nio.ByteBuff; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.HasThread; -import org.apache.hadoop.hbase.util.IdReadWriteLock; +import org.apache.hadoop.hbase.util.*; import org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType; import org.apache.hadoop.util.StringUtils; @@ -1400,6 +1383,13 @@ public class BucketCache implements BlockCache, HeapSize { realCacheSize.addAndGet(len); return bucketEntry; } + + public long heapSize() { + return key.heapSize() + data.heapSize() + + ClassSize.align(ClassSize.OBJECT + Bytes.SIZEOF_LONG + + Bytes.SIZEOF_BOOLEAN + Bytes.SIZEOF_BYTE); + } + } /** @@ -1531,4 +1521,99 @@ public class BucketCache implements BlockCache, HeapSize { } return 0; } + + /** + * Get the distribution of block size in the cache + * @param granularitySize + * @return a list of BlockCacheSizeDistributionSummary + */ + public List getBlockSizeDistribution( + int granularitySize) { + Map summaryMap = new TreeMap(); + for (RAMQueueEntry ramEntry : ramCache.values()) { + long blockSerializedLen = ramEntry.getData().getSerializedLength(); + addBlockSizeDistribution(summaryMap, granularitySize, blockSerializedLen, + ramEntry.getData().heapSize()); + } + for (BucketEntry bucketEntry : backingMap.values()) { + long blockUsedSize = bucketAllocator.roundUpToBucketSizeInfo( + bucketEntry.getLength()).blockSize(); + addBlockSizeDistribution(summaryMap, granularitySize, + bucketEntry.getLength(), blockUsedSize); + } + List list = new ArrayList( + summaryMap.values()); + return list; + } + + public List getBlockSizeDistribution() { + return getBlockSizeDistribution(4 * 1024); + } + + private void addBlockSizeDistribution( + Map summaryMap, + int granularitySize, long blockSerializedLen, long blockHeapSize) { + int roundDownSize = (int) (blockSerializedLen / granularitySize * granularitySize); + int roundUpSize = roundDownSize + granularitySize; + BlockCacheSizeDistributionSummary distributionSummary = summaryMap + .get(roundDownSize); + if (distributionSummary == null) { + distributionSummary = new BlockCacheSizeDistributionSummary( + roundDownSize, roundUpSize); + summaryMap.put(roundDownSize, distributionSummary); + } + distributionSummary.incrementBlocks(); + distributionSummary.incrementHeapSize(blockHeapSize); + } + + + @Override + public List getBlockCacheColumnFamilySummaries( + Configuration conf) throws IOException, InterruptedException { + Map sfMap = FSUtils + .getTableStoreFilePathMap(FileSystem.get(conf), + FSUtils.getRootDir(conf)); + Map fileToCfMap = new HashMap(); + for (Map.Entry entry : sfMap.entrySet()) { + fileToCfMap.put(entry.getKey(), BlockCacheColumnFamilySummary + .createFromStoreFilePath(entry.getValue())); + } + // quirky, but it's a compound key and this is a shortcut taken instead of + // creating a class that would represent only a key. + Map bcs = new HashMap(); + for (Map.Entry ramEntry : ramCache.entrySet()) { + addBlockCacheColumnFamilySummaries(fileToCfMap, ramEntry.getKey(), bcs, + ramEntry.getValue().heapSize()); + } + for (Map.Entry bucketEntry : backingMap + .entrySet()) { + long blockUsedSize = bucketAllocator.roundUpToBucketSizeInfo( + bucketEntry.getValue().getLength()).blockSize(); + addBlockCacheColumnFamilySummaries(fileToCfMap, bucketEntry.getKey(), + bcs, blockUsedSize); + } + List list = new ArrayList( + bcs.values()); + Collections.sort(list); + return list; + } + + private void addBlockCacheColumnFamilySummaries( + Map fileToCfMap, + BlockCacheKey cacheKey, + Map bcs, + long blockSize) { + String sf = cacheKey.getHfileName(); + BlockCacheColumnFamilySummary lookup = fileToCfMap.get(sf); + if (lookup != null) { + BlockCacheColumnFamilySummary bcse = bcs.get(lookup); + if (bcse == null) { + bcse = BlockCacheColumnFamilySummary.create(lookup); + bcs.put(lookup, bcse); + } + bcse.incrementBlocks(); + bcse.incrementHeapSize(blockSize); + } + } + } diff --git a/hbase-server/src/main/resources/hbase-webapps/regionserver/cachesummary.jsp b/hbase-server/src/main/resources/hbase-webapps/regionserver/cachesummary.jsp new file mode 100644 index 0000000..5a1b1de --- /dev/null +++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/cachesummary.jsp @@ -0,0 +1,98 @@ + +<%@ page contentType="text/html;charset=UTF-8" + import="java.util.*" + import="java.io.IOException" + import="org.apache.hadoop.io.Text" + import="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate" + import="org.apache.hadoop.metrics.util.MetricsRegistry" + import="org.apache.hadoop.hbase.regionserver.HRegionServer" + import="org.apache.hadoop.hbase.regionserver.HRegion" + import="org.apache.hadoop.hbase.util.Bytes" + import="org.apache.hadoop.hbase.HConstants" + import="org.apache.hadoop.hbase.HRegionInfo" + import="org.apache.hadoop.hbase.io.hfile.*" + import="org.apache.hadoop.hbase.io.hfile.bucket.*" + import="org.apache.hadoop.conf.Configuration" + import="org.apache.hadoop.util.StringUtils" +%> +<% + try{ + HRegionServer regionServer = (HRegionServer) getServletContext() + .getAttribute(HRegionServer.REGIONSERVER); + Configuration conf=regionServer.getConfiguration(); + String unitSizeStr = request.getParameter("size"); + String tableDisStr = request.getParameter("table"); + BlockCache blockCache=new CacheConfig(conf).getBlockCache(); + out.println("BlockCache type:"+blockCache.getClass().getName()+"

    "); + LruBlockCache lruCache = null; + BlockCache bucketCache = null; + if(blockCache instanceof LruBlockCache){ + lruCache = (LruBlockCache)blockCache; + bucketCache = lruCache.getVictimHandler(); + }else if(blockCache instanceof CombinedBlockCache){ + lruCache = (LruBlockCache)((CombinedBlockCache)blockCache).getBlockCaches()[0]; + bucketCache = (BlockCache)((CombinedBlockCache)blockCache).getBlockCaches()[1]; + } + for(BlockCache cache : new BlockCache[]{lruCache,bucketCache}){ + if(cache != null){ + out.println("

    "+cache.getClass().getSimpleName()+"

    "); + out.println("Total size:"+StringUtils.byteDesc(cache.getCurrentSize()+cache.getFreeSize())+"

    "); + out.println("Current size:"+StringUtils.byteDesc(cache.getCurrentSize())+"

    "); + //BlockCache does not discern between DATA and META blocks so we do not show MetaBlock counts + //out.println("MetaBlock size:"+StringUtils.byteDesc(cache.getMetaSize())+"

    "); + out.println("Free size:"+StringUtils.byteDesc(cache.getFreeSize())+"

    "); + out.println("Block count:"+(cache.getBlockCount())+"

    "); + out.println("Size distribution summary:

    "); + List sizeSummary; + if(cache==lruCache){ + if(unitSizeStr!=null){ + sizeSummary=((LruBlockCache)cache).getBlockSizeDistribution(Integer.parseInt(unitSizeStr)*1024); + }else{ + sizeSummary=((LruBlockCache)cache).getBlockSizeDistribution(); + } + }else{ + if(unitSizeStr!=null){ + sizeSummary=((BucketCache)cache).getBlockSizeDistribution(Integer.parseInt(unitSizeStr)*1024); + }else{ + sizeSummary=((BucketCache)cache).getBlockSizeDistribution(); + } + } + + for(BlockCacheSizeDistributionSummary bcds:sizeSummary){ + out.println(bcds+"

    "); + } + if("true".equals(tableDisStr)){ + out.println("Table/Family summary:

    "); + List familySummay=cache.getBlockCacheColumnFamilySummaries(conf); + for(BlockCacheColumnFamilySummary bcfs:familySummay){ + out.println(bcfs+"

    "); + } + } + + out.println("

    "); + } + } + + }catch(Throwable t){ + out.println(t.toString()+"

    "); + } + +%> \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java index e1a8d5f..eb8cfc9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java @@ -23,8 +23,10 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import java.io.IOException; import java.lang.management.ManagementFactory; import java.util.Iterator; +import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ChoreService; @@ -36,12 +38,7 @@ import org.apache.hadoop.hbase.Server; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.Waiter; import org.apache.hadoop.hbase.client.ClusterConnection; -import org.apache.hadoop.hbase.io.hfile.BlockCache; -import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; -import org.apache.hadoop.hbase.io.hfile.CacheStats; -import org.apache.hadoop.hbase.io.hfile.Cacheable; -import org.apache.hadoop.hbase.io.hfile.CachedBlock; -import org.apache.hadoop.hbase.io.hfile.ResizableBlockCache; +import org.apache.hadoop.hbase.io.hfile.*; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerContext; import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerResult; @@ -771,6 +768,12 @@ public class TestHeapMemoryManager { public void returnBlock(BlockCacheKey cacheKey, Cacheable buf) { } + @Override + public List getBlockCacheColumnFamilySummaries( + Configuration conf) throws IOException, InterruptedException { + return null; + } + public void setTestBlockSize(long testBlockSize) { this.testBlockSize = testBlockSize; }