Index: src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheSummaryEntry.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheSummaryEntry.java (revision 0) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheSummaryEntry.java (revision 0) @@ -0,0 +1,197 @@ +/** + * Copyright 2011 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.io.hfile; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.Writable; + +/** + * Represents a summary of the blockCache by Table and ColumnFamily + * + */ +public class BlockCacheSummaryEntry implements Writable { + + private String table; + private String columnFamily; + private int blocks; + private long heapSize; + + + + /** + * + * @return table + */ + public String getTable() { + return table; + } + /** + * + * @param table (table that owns the cached block) + */ + public void setTable(String table) { + this.table = table; + } + /** + * + * @return columnFamily + */ + public String getColumnFamily() { + return columnFamily; + } + /** + * + * @param columnFamily (columnFamily that owns the cached block) + */ + public void setColumnFamily(String columnFamily) { + this.columnFamily = columnFamily; + } + + /** + * + * @return heapSize + */ + public int getBlocks() { + return blocks; + } + /** + * + * @param blocks in the cache + */ + public void setBlocks(int blocks) { + this.blocks = blocks; + } + + /** + * + * @return heapSize + */ + public long getHeapSize() { + return heapSize; + } + + /** + * + */ + public void incrementBlocks() { + this.blocks++; + } + + /** + * + * @param heapSize to increment + */ + public void incrementHeapSize(long heapSize) { + this.heapSize = this.heapSize + heapSize; + } + + /** + * + * @param heapSize (total heapSize for the table/CF) + */ + public void setHeapSize(long heapSize) { + this.heapSize = heapSize; + } + + @Override + public void readFields(DataInput arg0) throws IOException { + table = arg0.readUTF(); + columnFamily = arg0.readUTF(); + blocks = arg0.readInt(); + heapSize = arg0.readLong(); + } + + @Override + public void write(DataOutput arg0) throws IOException { + arg0.writeUTF( table ); + arg0.writeUTF( columnFamily ); + arg0.writeInt( blocks ); + arg0.writeLong( heapSize ); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + + ((columnFamily == null) ? 0 : columnFamily.hashCode()); + result = prime * result + ((table == null) ? 0 : table.hashCode()); + return result; + } + @Override + public boolean equals(Object obj) { + if (this == obj) + return true; + if (obj == null) + return false; + if (getClass() != obj.getClass()) + return false; + BlockCacheSummaryEntry other = (BlockCacheSummaryEntry) obj; + if (columnFamily == null) { + if (other.columnFamily != null) + return false; + } else if (!columnFamily.equals(other.columnFamily)) + return false; + if (table == null) { + if (other.table != null) + return false; + } else if (!table.equals(other.table)) + return false; + return true; + } + + + + @Override + public String toString() { + return "BlockCacheSummaryEntry [table=" + table + ", columnFamily=" + + columnFamily + ", blocks=" + blocks + ", heapSize=" + heapSize + "]"; + } + + /** + * Construct a BlockCacheSummaryEntry from a full StoreFile Path + * + * @param path (full StoreFile Path) + * @return BlockCacheSummaryEntry + */ + public static BlockCacheSummaryEntry createFromStoreFilePath(Path path) { + + // The full path will look something like this... + // we will process from the end backwards... + // hdfs://localhost:51169/user/doug.meil/-ROOT-/70236052/info/3944417774205889744 + + String sp = path.toString(); + String s[] = sp.split("\\/"); + + BlockCacheSummaryEntry bcse = null; + if ( s.length >= 4) { + // why 4? StoreFile, CF, Region, Table + bcse = new BlockCacheSummaryEntry(); + bcse.setTable( s[ s.length - 4] ); // 4th from the end + bcse.setColumnFamily( s[ s.length - 2] ); // 2nd from the end + } + return bcse; + } + +} Index: src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java (revision 1156377) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/SimpleBlockCache.java (working copy) @@ -21,10 +21,11 @@ import java.lang.ref.ReferenceQueue; import java.lang.ref.SoftReference; -import java.nio.ByteBuffer; import java.util.HashMap; +import java.util.List; import java.util.Map; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.LruBlockCache.CacheStats; @@ -123,5 +124,11 @@ public int evictBlocksByPrefix(String string) { throw new UnsupportedOperationException(); } + + @Override + public List getBlockCacheSummary(Configuration conf) { + throw new UnsupportedOperationException(); + } + } Index: src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java (revision 1156377) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java (working copy) @@ -19,21 +19,31 @@ */ package org.apache.hadoop.hbase.io.hfile; +import java.io.IOException; import java.lang.ref.WeakReference; +import java.util.ArrayList; +import java.util.HashMap; import java.util.LinkedList; +import java.util.List; +import java.util.Map; import java.util.PriorityQueue; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.util.StringUtils; import com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -725,6 +735,44 @@ (concurrency * ClassSize.CONCURRENT_HASHMAP_SEGMENT); } + @Override + public List getBlockCacheSummary(Configuration conf) throws IOException { + + Map sfMap = FSUtils.getTableStoreFilePathMap(FileSystem.get(conf) , FSUtils.getRootDir(conf)); + + LOG.info("blockCacheSummar (sfMap):" + sfMap ); + + // quirky, but it's a compound key and this is a shortcut taken instead of creating a class that + // would represent only a key. + Map bcs = new HashMap(); + + final String pattern = "\\" + HFile.CACHE_KEY_SEPARATOR; + + for (CachedBlock cb : map.values()) { + // for example, split this and get the first part "8351478435190657655_0" + String s[] = cb.getName().split( pattern ); + if ( s.length > 0) { + String sf = s[0]; + Path path = sfMap.get(sf); + if ( path != null ) { + BlockCacheSummaryEntry lookup = BlockCacheSummaryEntry.createFromStoreFilePath( path ); + + BlockCacheSummaryEntry bcse = bcs.get( lookup ); + if (bcse == null) { + bcse = lookup; + bcs.put(lookup, bcse); + } + bcse.incrementBlocks(); + bcse.incrementHeapSize( cb.heapSize() ); + } + } + } + + List list = new ArrayList(); + list.addAll( bcs.values() ); + return list; + } + // Simple calculators of sizes given factors and maxSize private long acceptableSize() { Index: src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java (revision 1156377) +++ src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCache.java (working copy) @@ -19,6 +19,10 @@ */ package org.apache.hadoop.hbase.io.hfile; +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.hfile.LruBlockCache.CacheStats; @@ -90,4 +94,6 @@ public long getCurrentSize(); public long getEvictedCount(); + + public List getBlockCacheSummary(Configuration conf) throws IOException; } \ No newline at end of file Index: src/main/java/org/apache/hadoop/hbase/util/FSUtils.java =================================================================== --- src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (revision 1156377) +++ src/main/java/org/apache/hadoop/hbase/util/FSUtils.java (working copy) @@ -1016,4 +1016,58 @@ out.close(); } } + + /** + * Runs through the HBase rootdir and creates a reverse lookup map for table StoreFile names + * to the full path. + * + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. + * @return Map keyed by StoreFile name with a value of the full Path. + * @throws IOException When scanning the directory fails. + */ + public static Map getTableStoreFilePathMap( + final FileSystem fs, final Path hbaseRootDir) + throws IOException { + Map map = new HashMap(); + + // if this method looks similar to 'getTableFragmentation' that is because it was + // borrowed from it. + + DirFilter df = new DirFilter(fs); + // presumes any directory under hbase.rootdir is a table + FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df); + for (FileStatus tableDir : tableDirs) { + // Skip the .log directory. All others should be tables. Inside a table, + // there are compaction.dir directories to skip. Otherwise, all else + // should be regions. Then in each region, should only be family + // directories. Under each of these, should be one file only. + Path d = tableDir.getPath(); + if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) { + continue; + } + FileStatus[] regionDirs = fs.listStatus(d, df); + for (FileStatus regionDir : regionDirs) { + Path dd = regionDir.getPath(); + if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) { + continue; + } + // else its a region name, now look in region for families + FileStatus[] familyDirs = fs.listStatus(dd, df); + for (FileStatus familyDir : familyDirs) { + Path family = familyDir.getPath(); + // now in family, iterate over the StoreFiles and + // put in map + FileStatus[] familyStatus = fs.listStatus(family); + for (FileStatus sfStatus : familyStatus) { + Path sf = sfStatus.getPath(); + map.put( sf.getName(), sf); + } + + } + } + } + return map; + } + }