diff --git a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java index 69d8521..964f3d6 100644 --- a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java +++ b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.util.Addressing; import org.apache.htrace.Trace; import org.apache.htrace.TraceScope; - import java.io.IOException; import java.net.InetSocketAddress; import java.nio.ByteBuffer; @@ -150,8 +149,10 @@ public class MemcachedBlockCache implements BlockCache { if (updateCacheMetrics) { if (result == null) { cacheStats.miss(caching, cacheKey.isPrimary(), cacheKey.getBlockType()); + cacheStats.incrementMissForTable(cacheKey.getTableName()); } else { cacheStats.hit(caching, cacheKey.isPrimary(), cacheKey.getBlockType()); + cacheStats.incrementHitForTable(cacheKey.getTableName()); } } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java index 7d1f5d0..02369ed 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java @@ -36,6 +36,16 @@ public interface MetricsTableSource extends Comparable { String STORE_FILE_SIZE_DESC = "The size of store files size"; String TABLE_SIZE = "tableSize"; String TABLE_SIZE_DESC = "Total size of the table in the region server"; + String BLOCK_CACHE_COUNT = "blockCacheCount"; + String BLOCK_CACHE_COUNT_DESC = "Number of block in the block cache."; + String BLOCK_CACHE_SIZE = "blockCacheSize"; + String BLOCK_CACHE_SIZE_DESC = "Size of the block cache."; + String BLOCK_CACHE_HIT_COUNT = "blockCacheHitCount"; + String BLOCK_CACHE_HIT_COUNT_DESC = "Count of the table hits on the block cache."; + String BLOCK_CACHE_MISS_COUNT = "blockCacheMissCount"; + String BLOCK_COUNT_MISS_COUNT_DESC = "Count of the table misses on the block cache."; + String BLOCK_CACHE_HIT_PERCENT = "blockCacheHitPercent"; + String BLOCK_CACHE_HIT_PERCENT_DESC = "Hit percent of the block cache on the table."; String getTableName(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java index 85ea4f6..2dc593c 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java @@ -54,4 +54,14 @@ public interface MetricsTableWrapperAggregate { * Get the table region size against this table */ long getTableSize(String table); + + long getBlockCacheCount(String table); + + long getBlockCacheSize(String table); + + long getBlockCacheHitCount(String table); + + long getBlockCacheMissCount(String table); + + double getBlockCacheHitPercent(String table); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java index 5d1dd79..2cded33 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java @@ -140,6 +140,21 @@ public class MetricsTableSourceImpl implements MetricsTableSource { mrb.addGauge(Interns.info(tableNamePrefix + MetricsTableSource.TABLE_SIZE, MetricsTableSource.TABLE_SIZE_DESC), tableWrapperAgg.getTableSize(tableName.getNameAsString())); + mrb.addCounter(Interns.info(tableNamePrefix + MetricsTableSource.BLOCK_CACHE_COUNT, + MetricsTableSource.BLOCK_CACHE_COUNT_DESC), + tableWrapperAgg.getBlockCacheCount(tableName.getNameAsString())); + mrb.addGauge(Interns.info(tableNamePrefix + MetricsTableSource.BLOCK_CACHE_SIZE, + MetricsTableSource.BLOCK_CACHE_SIZE_DESC), + tableWrapperAgg.getBlockCacheSize(tableName.getNameAsString())); + mrb.addCounter(Interns.info(tableNamePrefix + MetricsTableSource.BLOCK_CACHE_HIT_COUNT, + MetricsTableSource.BLOCK_CACHE_HIT_COUNT_DESC), + tableWrapperAgg.getBlockCacheHitCount(tableName.getNameAsString())); + mrb.addCounter(Interns.info(tableNamePrefix + MetricsTableSource.BLOCK_CACHE_MISS_COUNT, + MetricsTableSource.BLOCK_COUNT_MISS_COUNT_DESC), + tableWrapperAgg.getBlockCacheMissCount(tableName.getNameAsString())); + mrb.addGauge(Interns.info(tableNamePrefix + MetricsTableSource.BLOCK_CACHE_HIT_PERCENT, + MetricsTableSource.BLOCK_CACHE_HIT_PERCENT_DESC), + tableWrapperAgg.getBlockCacheHitPercent(tableName.getNameAsString())); } } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java index 2717817..bb327d6 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java @@ -106,6 +106,31 @@ public class TestMetricsTableSourceImpl { return 3000; } + @Override + public long getBlockCacheSize(String table) { + return 5000; + } + + @Override + public long getBlockCacheCount(String table) { + return 100; + } + + @Override + public long getBlockCacheHitCount(String table) { + return 800; + } + + @Override + public long getBlockCacheMissCount(String table) { + return 200; + } + + @Override + public double getBlockCacheHitPercent(String table) { + return 80.0; + } + public String getTableName() { return tableName; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java index a4a281e..e07e412 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/HalfStoreFileReader.java @@ -23,13 +23,14 @@ import java.nio.ByteBuffer; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFileScanner; @@ -63,6 +64,8 @@ public class HalfStoreFileReader extends StoreFileReader { private boolean firstKeySeeked = false; + private TableName tableName; + /** * Creates a half file reader for a normal hfile. * @param fs fileystem to read from @@ -73,9 +76,9 @@ public class HalfStoreFileReader extends StoreFileReader { * @throws IOException */ public HalfStoreFileReader(final FileSystem fs, final Path p, - final CacheConfig cacheConf, final Reference r, final Configuration conf) - throws IOException { - super(fs, p, cacheConf, conf); + final CacheConfig cacheConf, final Reference r, final Configuration conf, + final TableName tableName) throws IOException { + super(fs, p, cacheConf, conf, tableName); // This is not actual midkey for this half-file; its just border // around which we split top and bottom. Have to look in files to find // actual last and first keys for bottom and top halves. Half-files don't @@ -85,6 +88,7 @@ public class HalfStoreFileReader extends StoreFileReader { this.splitCell = new KeyValue.KeyOnlyKeyValue(this.splitkey, 0, this.splitkey.length); // Is it top or bottom half? this.top = Reference.isTopFileRegion(r.getFileRegion()); + this.tableName = tableName; } /** @@ -99,9 +103,9 @@ public class HalfStoreFileReader extends StoreFileReader { * @throws IOException */ public HalfStoreFileReader(final FileSystem fs, final Path p, final FSDataInputStreamWrapper in, - long size, final CacheConfig cacheConf, final Reference r, final Configuration conf) - throws IOException { - super(fs, p, in, size, cacheConf, conf); + long size, final CacheConfig cacheConf, final Reference r, final Configuration conf, + final TableName tableName) throws IOException { + super(fs, p, in, size, cacheConf, conf, tableName); // This is not actual midkey for this half-file; its just border // around which we split top and bottom. Have to look in files to find // actual last and first keys for bottom and top halves. Half-files don't @@ -111,6 +115,7 @@ public class HalfStoreFileReader extends StoreFileReader { this.splitCell = new KeyValue.KeyOnlyKeyValue(this.splitkey, 0, this.splitkey.length); // Is it top or bottom half? this.top = Reference.isTopFileRegion(r.getFileRegion()); + this.tableName = tableName; } protected boolean isTop() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java index 64405de..fc78908 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheKey.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase.io.hfile; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.util.Bytes; @@ -32,6 +33,7 @@ public class BlockCacheKey implements HeapSize, java.io.Serializable { private final long offset; private final BlockType blockType; private final boolean isPrimaryReplicaBlock; + private final TableName tableName; /** * Construct a new BlockCacheKey @@ -39,14 +41,16 @@ public class BlockCacheKey implements HeapSize, java.io.Serializable { * @param offset Offset of the block into the file */ public BlockCacheKey(String hfileName, long offset) { - this(hfileName, offset, true, BlockType.DATA); + this(hfileName, offset, true, BlockType.DATA, null); } - public BlockCacheKey(String hfileName, long offset, boolean isPrimaryReplica, BlockType blockType) { + public BlockCacheKey(String hfileName, long offset, boolean isPrimaryReplica, BlockType blockType, + TableName tableName) { this.isPrimaryReplicaBlock = isPrimaryReplica; this.hfileName = hfileName; this.offset = offset; this.blockType = blockType; + this.tableName = tableName; } @Override @@ -76,7 +80,8 @@ public class BlockCacheKey implements HeapSize, java.io.Serializable { Bytes.SIZEOF_BOOLEAN + ClassSize.REFERENCE + // this.hfileName ClassSize.REFERENCE + // this.blockType - Bytes.SIZEOF_LONG); // this.offset + Bytes.SIZEOF_LONG + // this.offset + ClassSize.REFERENCE); // this.tableName /** * Strings have two bytes per character due to default Java Unicode encoding @@ -107,4 +112,8 @@ public class BlockCacheKey implements HeapSize, java.io.Serializable { public BlockType getBlockType() { return blockType; } + + public TableName getTableName() { + return tableName; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java index 8de2a03..70545f9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheStats.java @@ -18,10 +18,12 @@ */ package org.apache.hadoop.hbase.io.hfile; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; - import org.apache.hadoop.hbase.util.Counter; import org.apache.hadoop.hbase.util.FastLongHistogram; @@ -122,6 +124,7 @@ public class CacheStats { */ private FastLongHistogram ageAtEviction; private long startTime = System.nanoTime(); + private Map tableCacheMetricsMap = new ConcurrentHashMap<>(); public CacheStats(final String name) { this(name, DEFAULT_WINDOW_PERIODS); @@ -468,4 +471,141 @@ public class CacheStats { } return zeros; } + + public static class MetricsTableCacheValues { + private Counter blockCacheHitCount; + private Counter blockCacheMissCount; + private Counter blockCacheSize; + private Counter blockCacheCount; + + public MetricsTableCacheValues() { + blockCacheHitCount = new Counter(0); + blockCacheMissCount = new Counter(0); + blockCacheSize = new Counter(0); + blockCacheCount = new Counter(0); + } + + public long getBlockCacheHitCount() { + return blockCacheHitCount.get(); + } + + public void incrementBlockCacheHitCount() { + this.blockCacheHitCount.increment(); + } + + public long getBlockCacheMissCount() { + return blockCacheMissCount.get(); + } + + public void incrementBlockCacheMissCount() { + this.blockCacheMissCount.increment(); + } + + public long getBlockCacheSize() { + return blockCacheSize.get(); + } + + public void incrementBlockCacheSize(long blockCacheSize) { + this.blockCacheSize.add(blockCacheSize); + } + + public long getBlockCacheCount() { + return blockCacheCount.get(); + } + + public void incrementBlockCacheCount() { + this.blockCacheCount.increment(); + } + + public void decrementBlockCacheCount() { + this.blockCacheCount.decrement(); + } + } + + public void incrementHitForTable(TableName tableName) { + if (tableName == null) return; + MetricsTableCacheValues val = tableCacheMetricsMap.get(tableName); + if (val == null) { + val = new MetricsTableCacheValues(); + } + val.incrementBlockCacheHitCount(); + tableCacheMetricsMap.put(tableName, val); + } + + public void incrementMissForTable(TableName tableName) { + if (tableName == null) return; + MetricsTableCacheValues val = tableCacheMetricsMap.get(tableName); + if (val == null) { + val = new MetricsTableCacheValues(); + } + val.incrementBlockCacheMissCount(); + tableCacheMetricsMap.put(tableName, val); + } + + public void incrementBlockCountForTable(TableName tableName) { + if (tableName == null) return; + MetricsTableCacheValues val = tableCacheMetricsMap.get(tableName); + if (val == null) { + val = new MetricsTableCacheValues(); + } + val.incrementBlockCacheCount(); + tableCacheMetricsMap.put(tableName, val); + } + + public void decrementBlockCountForTable(TableName tableName) { + if (tableName == null) return; + MetricsTableCacheValues val = tableCacheMetricsMap.get(tableName); + if (val == null) { + val = new MetricsTableCacheValues(); + } + val.decrementBlockCacheCount(); + tableCacheMetricsMap.put(tableName, val); + } + + public void incrementBlockSizeForTable(TableName tableName, long size) { + if (tableName == null) return; + MetricsTableCacheValues val = tableCacheMetricsMap.get(tableName); + if (val == null) { + val = new MetricsTableCacheValues(); + } + val.incrementBlockCacheSize(size); + tableCacheMetricsMap.put(tableName, val); + } + + public long getBlockCacheHitCount(TableName tableName) { + MetricsTableCacheValues val = tableCacheMetricsMap.get(tableName); + if (val == null) + return 0; + return val.getBlockCacheHitCount(); + } + + public long getBlockCacheMissCount(TableName tableName) { + MetricsTableCacheValues val = tableCacheMetricsMap.get(tableName); + if (val == null) + return 0; + return val.getBlockCacheMissCount(); + } + + public long getBlockCacheCount(TableName tableName) { + MetricsTableCacheValues val = tableCacheMetricsMap.get(tableName); + if (val == null) + return 0; + return val.getBlockCacheCount(); + } + + public double getBlockCacheHitRatio(TableName tableName) { + return ((float)getBlockCacheHitCount(tableName)/(float)(getBlockCacheHitCount(tableName)+ + getBlockCacheMissCount(tableName))); + } + + public long getBlockCacheSize(TableName tableName) { + MetricsTableCacheValues val = tableCacheMetricsMap.get(tableName); + if (val == null) + return 0; + return val.getBlockCacheSize(); + } + + public Map getTableCacheMetricsMap() { + return tableCacheMetricsMap; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java index 2d773bb..1d631e8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CompoundBloomFilter.java @@ -25,6 +25,7 @@ import java.io.IOException; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.nio.ByteBuff; import org.apache.hadoop.hbase.util.BloomFilter; @@ -94,16 +95,18 @@ public class CompoundBloomFilter extends CompoundBloomFilterBase } @Override - public boolean contains(byte[] key, int keyOffset, int keyLength, ByteBuff bloom) { + public boolean contains(byte[] key, int keyOffset, int keyLength, ByteBuff bloom, + TableName tableName) { // We try to store the result in this variable so we can update stats for // testing, but when an error happens, we log a message and return. int block = index.rootBlockContainingKey(key, keyOffset, keyLength); - return checkContains(key, keyOffset, keyLength, block); + return checkContains(key, keyOffset, keyLength, block, tableName); } - private boolean checkContains(byte[] key, int keyOffset, int keyLength, int block) { + private boolean checkContains(byte[] key, int keyOffset, int keyLength, int block, + TableName tableName) { boolean result; if (block < 0) { result = false; // This key is not in the file. @@ -113,7 +116,7 @@ public class CompoundBloomFilter extends CompoundBloomFilterBase // We cache the block and use a positional read. bloomBlock = reader.readBlock(index.getRootBlockOffset(block), index.getRootBlockDataSize(block), true, true, false, true, - BlockType.BLOOM_CHUNK, null); + BlockType.BLOOM_CHUNK, null, tableName); } catch (IOException ex) { // The Bloom filter is broken, turn it off. throw new IllegalArgumentException( @@ -142,7 +145,7 @@ public class CompoundBloomFilter extends CompoundBloomFilterBase } @Override - public boolean contains(Cell keyCell, ByteBuff bloom) { + public boolean contains(Cell keyCell, ByteBuff bloom, TableName tableName) { // We try to store the result in this variable so we can update stats for // testing, but when an error happens, we log a message and return. int block = index.rootBlockContainingKey(keyCell); @@ -151,10 +154,10 @@ public class CompoundBloomFilter extends CompoundBloomFilterBase // with BBs then the Hash.java APIs should also be changed to work with BBs. if (keyCell instanceof KeyValue) { return checkContains(((KeyValue) keyCell).getBuffer(), ((KeyValue) keyCell).getKeyOffset(), - ((KeyValue) keyCell).getKeyLength(), block); + ((KeyValue) keyCell).getKeyLength(), block, tableName); } byte[] key = CellUtil.getCellKeySerializedAsKeyValueKey(keyCell); - return checkContains(key, 0, key.length, block); + return checkContains(key, 0, key.length, block, tableName); } public boolean supportsAutoLoading() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 8582dbe..6a99834 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -35,10 +35,8 @@ import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; -import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -49,6 +47,8 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.compress.Compression; @@ -59,6 +59,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.BytesBytesPair; import org.apache.hadoop.hbase.protobuf.generated.HFileProtos; import org.apache.hadoop.hbase.util.BloomFilterWriter; +import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Counter; import org.apache.hadoop.hbase.util.FSUtils; @@ -298,7 +299,7 @@ public class HFile { } - public Writer create() throws IOException { + public Writer create(TableName tableName) throws IOException { if ((path != null ? 1 : 0) + (ostream != null ? 1 : 0) != 1) { throw new AssertionError("Please specify exactly one of " + "filesystem/path or path"); @@ -312,7 +313,8 @@ public class HFile { else if (LOG.isDebugEnabled()) LOG.debug("Unable to set drop behind on " + path); } } - return new HFileWriterImpl(conf, cacheConf, path, ostream, comparator, fileContext); + return new HFileWriterImpl(conf, cacheConf, path, ostream, comparator, fileContext, + tableName); } } @@ -383,7 +385,7 @@ public class HFile { HFileBlock readBlock(long offset, long onDiskBlockSize, boolean cacheBlock, final boolean pread, final boolean isCompaction, final boolean updateCacheMetrics, BlockType expectedBlockType, - DataBlockEncoding expectedDataBlockEncoding) + DataBlockEncoding expectedDataBlockEncoding, TableName tableName) throws IOException; /** @@ -494,7 +496,8 @@ public class HFile { @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SF_SWITCH_FALLTHROUGH", justification="Intentional") private static Reader pickReaderVersion(Path path, FSDataInputStreamWrapper fsdis, - long size, CacheConfig cacheConf, HFileSystem hfs, Configuration conf) throws IOException { + long size, CacheConfig cacheConf, HFileSystem hfs, Configuration conf, TableName tableName) + throws IOException { FixedFileTrailer trailer = null; try { boolean isHBaseChecksum = fsdis.shouldUseHBaseChecksum(); @@ -505,7 +508,7 @@ public class HFile { LOG.debug("Opening HFile v2 with v3 reader"); // Fall through. FindBugs: SF_SWITCH_FALLTHROUGH case 3 : - return new HFileReaderImpl(path, trailer, fsdis, size, cacheConf, hfs, conf); + return new HFileReaderImpl(path, trailer, fsdis, size, cacheConf, hfs, conf, tableName); default: throw new IllegalArgumentException("Invalid HFile version " + trailer.getMajorVersion()); } @@ -531,8 +534,8 @@ public class HFile { */ @SuppressWarnings("resource") public static Reader createReader(FileSystem fs, Path path, - FSDataInputStreamWrapper fsdis, long size, CacheConfig cacheConf, Configuration conf) - throws IOException { + FSDataInputStreamWrapper fsdis, long size, CacheConfig cacheConf, Configuration conf, + TableName tableName) throws IOException { HFileSystem hfs = null; // If the fs is not an instance of HFileSystem, then create an @@ -544,7 +547,7 @@ public class HFile { } else { hfs = (HFileSystem)fs; } - return pickReaderVersion(path, fsdis, size, cacheConf, hfs, conf); + return pickReaderVersion(path, fsdis, size, cacheConf, hfs, conf, tableName); } /** @@ -556,11 +559,17 @@ public class HFile { * @throws IOException Will throw a CorruptHFileException (DoNotRetryIOException subtype) if hfile is corrupt/invalid. */ public static Reader createReader( - FileSystem fs, Path path, CacheConfig cacheConf, Configuration conf) throws IOException { + FileSystem fs, Path path, CacheConfig cacheConf, Configuration conf, + TableName tableName) throws IOException { Preconditions.checkNotNull(cacheConf, "Cannot create Reader with null CacheConf"); FSDataInputStreamWrapper stream = new FSDataInputStreamWrapper(fs, path); return pickReaderVersion(path, stream, fs.getFileStatus(path).getLen(), - cacheConf, stream.getHfs(), conf); + cacheConf, stream.getHfs(), conf, tableName); + } + + public static Reader createReader( + FileSystem fs, Path path, CacheConfig cacheConf, Configuration conf) throws IOException { + return createReader(fs, path, cacheConf, conf, null); } /** @@ -568,9 +577,9 @@ public class HFile { */ static Reader createReaderFromStream(Path path, FSDataInputStream fsdis, long size, CacheConfig cacheConf, Configuration conf) - throws IOException { + throws IOException { FSDataInputStreamWrapper wrapper = new FSDataInputStreamWrapper(fsdis); - return pickReaderVersion(path, wrapper, size, cacheConf, null, conf); + return pickReaderVersion(path, wrapper, size, cacheConf, null, conf, null); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index 66968e0..921d028 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValue.KeyOnlyKeyValue; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; @@ -110,7 +111,7 @@ public class HFileBlockIndex { private byte[][] blockKeys; public ByteArrayKeyBlockIndexReader(final int treeLevel, - final CachingBlockReader cachingBlockReader) { + final CachingBlockReader cachingBlockReader, final TableName tableName) { this(treeLevel); this.cachingBlockReader = cachingBlockReader; } @@ -214,7 +215,6 @@ public class HFileBlockIndex { } return sb.toString(); } - } /** @@ -229,11 +229,18 @@ public class HFileBlockIndex { private AtomicReference midKey = new AtomicReference(); /** Needed doing lookup on blocks. */ private CellComparator comparator; + private TableName tableName; public CellBasedKeyBlockIndexReader(final CellComparator c, final int treeLevel, final CachingBlockReader cachingBlockReader) { + this(c, treeLevel, cachingBlockReader, null); + } + + public CellBasedKeyBlockIndexReader(final CellComparator c, final int treeLevel, + final CachingBlockReader cachingBlockReader, final TableName tableName) { this(c, treeLevel); this.cachingBlockReader = cachingBlockReader; + this.tableName = tableName; } public CellBasedKeyBlockIndexReader(final CellComparator c, final int treeLevel) { @@ -321,7 +328,7 @@ public class HFileBlockIndex { } block = cachingBlockReader.readBlock(currentOffset, currentOnDiskSize, shouldCache, pread, - isCompaction, true, expectedBlockType, expectedDataBlockEncoding); + isCompaction, true, expectedBlockType, expectedDataBlockEncoding, tableName); } if (block == null) { @@ -406,7 +413,7 @@ public class HFileBlockIndex { // Caching, using pread, assuming this is not a compaction. HFileBlock midLeafBlock = cachingBlockReader.readBlock( midLeafBlockOffset, midLeafBlockOnDiskSize, true, true, false, true, - BlockType.LEAF_INDEX, null); + BlockType.LEAF_INDEX, null, tableName); try { ByteBuff b = midLeafBlock.getBufferWithoutHeader(); int numDataBlocks = b.getIntAfterPosition(0); @@ -993,9 +1000,11 @@ public class HFileBlockIndex { /** Name to use for computing cache keys */ private String nameForCaching; + private TableName tableName; + /** Creates a single-level block index writer */ public BlockIndexWriter() { - this(null, null, null); + this(null, null, null, null); singleLevelOnly = true; } @@ -1006,7 +1015,7 @@ public class HFileBlockIndex { * @param cacheConf used to determine when and how a block should be cached-on-write. */ public BlockIndexWriter(HFileBlock.Writer blockWriter, - CacheConfig cacheConf, String nameForCaching) { + CacheConfig cacheConf, String nameForCaching, TableName tableName) { if ((cacheConf == null) != (nameForCaching == null)) { throw new IllegalArgumentException("Block cache and file name for " + "caching must be both specified or both null"); @@ -1016,6 +1025,7 @@ public class HFileBlockIndex { this.cacheConf = cacheConf; this.nameForCaching = nameForCaching; this.maxChunkSize = HFileBlockIndex.DEFAULT_MAX_CHUNK_SIZE; + this.tableName = tableName; } public void setMaxChunkSize(int maxChunkSize) { @@ -1075,7 +1085,7 @@ public class HFileBlockIndex { if (cacheConf != null) { HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf); cacheConf.getBlockCache().cacheBlock(new BlockCacheKey(nameForCaching, - rootLevelIndexPos, true, blockForCaching.getBlockType()), blockForCaching); + rootLevelIndexPos, true, blockForCaching.getBlockType(), tableName), blockForCaching); } } @@ -1175,7 +1185,7 @@ public class HFileBlockIndex { if (getCacheOnWrite()) { HFileBlock blockForCaching = blockWriter.getBlockForCaching(cacheConf); cacheConf.getBlockCache().cacheBlock(new BlockCacheKey(nameForCaching, - beginOffset, true, blockForCaching.getBlockType()), blockForCaching); + beginOffset, true, blockForCaching.getBlockType(), tableName), blockForCaching); } // Add intermediate index block size diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index 36067e5..3e09e70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -350,7 +350,7 @@ public class HFilePrettyPrinter extends Configured implements Tool { HFileBlock block; while (offset <= max) { block = reader.readBlock(offset, -1, /* cacheBlock */ false, /* pread */ false, - /* isCompaction */ false, /* updateCacheMetrics */ false, null, null); + /* isCompaction */ false, /* updateCacheMetrics */ false, null, null, null); offset += block.getOnDiskSizeWithHeader(); out.println(block); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java index fc1c04e..dcbef5c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.OffheapKeyValue; import org.apache.hadoop.hbase.SizeCachedKeyValue; import org.apache.hadoop.hbase.SizeCachedNoTagsKeyValue; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; @@ -140,6 +141,8 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { */ private List loadOnOpenBlocks = new ArrayList(); + private static TableName tableName; + /** Minimum minor version supported by this HFile format */ static final int MIN_MINOR_VERSION = 0; @@ -178,7 +181,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { public HFileReaderImpl(final Path path, FixedFileTrailer trailer, final FSDataInputStreamWrapper fsdis, final long fileSize, final CacheConfig cacheConf, final HFileSystem hfs, - final Configuration conf) + final Configuration conf, final TableName tableName) throws IOException { this.trailer = trailer; this.compressAlgo = trailer.getCompressionCodec(); @@ -190,11 +193,12 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { checkFileVersion(); this.hfileContext = createHFileContext(fsdis, fileSize, hfs, path, trailer); this.fsBlockReader = new HFileBlock.FSReaderImpl(fsdis, fileSize, hfs, path, hfileContext); + this.tableName = tableName; // Comparator class name is stored in the trailer in version 2. comparator = trailer.createComparator(); dataBlockIndexReader = new HFileBlockIndex.CellBasedKeyBlockIndexReader(comparator, - trailer.getNumDataIndexLevels(), this); + trailer.getNumDataIndexLevels(), this, tableName); metaBlockIndexReader = new HFileBlockIndex.ByteArrayKeyBlockIndexReader(1); // Parse load-on-open data. @@ -266,7 +270,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { // cached block. This 'optimization' triggers extremely rarely I'd say. long onDiskSize = prevBlock != null? prevBlock.getNextBlockOnDiskSize(): -1; HFileBlock block = readBlock(offset, onDiskSize, true, false, false, false, - null, null); + null, null, tableName); // Need not update the current block. Ideally here the readBlock won't find the // block in cache. We call this readBlock so that block data is read from FS and // cached in BC. So there is no reference count increment that happens here. @@ -365,7 +369,8 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { BlockCache blockCache = this.cacheConf.getBlockCache(); if (blockCache != null && block != null) { BlockCacheKey cacheKey = new BlockCacheKey(this.getFileContext().getHFileName(), - block.getOffset(), this.isPrimaryReplicaReader(), block.getBlockType()); + block.getOffset(), this.isPrimaryReplicaReader(), block.getBlockType(), + tableName); blockCache.returnBlock(cacheKey, block); } } @@ -507,13 +512,15 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { protected HFileBlock curBlock; // Previous blocks that were used in the course of the read protected final ArrayList prevBlocks = new ArrayList(); + protected static TableName tableName; public HFileScannerImpl(final HFile.Reader reader, final boolean cacheBlocks, - final boolean pread, final boolean isCompaction) { + final boolean pread, final boolean isCompaction, TableName tableName) { this.reader = reader; this.cacheBlocks = cacheBlocks; this.pread = pread; this.isCompaction = isCompaction; + this.tableName = tableName; } void updateCurrBlockRef(HFileBlock block) { @@ -876,7 +883,8 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { int prevBlockSize = -1; seekToBlock = reader.readBlock(previousBlockOffset, prevBlockSize, cacheBlocks, - pread, isCompaction, true, BlockType.DATA, getEffectiveDataBlockEncoding()); + pread, isCompaction, true, BlockType.DATA, getEffectiveDataBlockEncoding(), + tableName); // TODO shortcut: seek forward in this block to the last key of the // block. } @@ -913,7 +921,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { // it might turn out to be a non-data block. block = reader.readBlock(block.getOffset() + block.getOnDiskSizeWithHeader(), block.getNextBlockOnDiskSize(), cacheBlocks, pread, - isCompaction, true, null, getEffectiveDataBlockEncoding()); + isCompaction, true, null, getEffectiveDataBlockEncoding(), tableName); if (block != null && !block.getBlockType().isData()) { // Findbugs: NP_NULL_ON_SOME_PATH // Whatever block we read we will be returning it unless // it is a datablock. Just in case the blocks are non data blocks @@ -1114,10 +1122,10 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { return true; } - protected void readAndUpdateNewBlock(long firstDataBlockOffset) throws IOException, - CorruptHFileException { + protected void readAndUpdateNewBlock(long firstDataBlockOffset) + throws IOException, CorruptHFileException { HFileBlock newBlock = reader.readBlock(firstDataBlockOffset, -1, cacheBlocks, pread, - isCompaction, true, BlockType.DATA, getEffectiveDataBlockEncoding()); + isCompaction, true, BlockType.DATA, getEffectiveDataBlockEncoding(), tableName); if (newBlock.getOffset() < 0) { throw new IOException("Invalid block offset: " + newBlock.getOffset()); } @@ -1365,7 +1373,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { // Check cache for block. If found return. long metaBlockOffset = metaBlockIndexReader.getRootBlockOffset(block); BlockCacheKey cacheKey = new BlockCacheKey(name, metaBlockOffset, - this.isPrimaryReplicaReader(), BlockType.META); + this.isPrimaryReplicaReader(), BlockType.META, tableName); cacheBlock &= cacheConf.shouldCacheBlockOnRead(BlockType.META.getCategory()); if (cacheConf.isBlockCacheEnabled()) { @@ -1397,7 +1405,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { public HFileBlock readBlock(long dataBlockOffset, long onDiskBlockSize, final boolean cacheBlock, boolean pread, final boolean isCompaction, boolean updateCacheMetrics, BlockType expectedBlockType, - DataBlockEncoding expectedDataBlockEncoding) + DataBlockEncoding expectedDataBlockEncoding, TableName tableName) throws IOException { if (dataBlockIndexReader == null) { throw new IOException("Block index not loaded"); @@ -1415,7 +1423,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { // from doing). BlockCacheKey cacheKey = new BlockCacheKey(name, dataBlockOffset, - this.isPrimaryReplicaReader(), expectedBlockType); + this.isPrimaryReplicaReader(), expectedBlockType, tableName); boolean useLock = false; IdLock.Entry lockEntry = null; @@ -1580,8 +1588,8 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { private final DataBlockEncoder dataBlockEncoder; public EncodedScanner(HFile.Reader reader, boolean cacheBlocks, - boolean pread, boolean isCompaction, HFileContext meta) { - super(reader, cacheBlocks, pread, isCompaction); + boolean pread, boolean isCompaction, HFileContext meta, TableName tableName) { + super(reader, cacheBlocks, pread, isCompaction, tableName); DataBlockEncoding encoding = reader.getDataBlockEncoding(); dataBlockEncoder = encoding.getEncoder(); decodingCtx = dataBlockEncoder.newDataBlockDecodingContext(meta); @@ -1835,12 +1843,18 @@ public class HFileReaderImpl implements HFile.Reader, Configurable { public HFileScanner getScanner(boolean cacheBlocks, final boolean pread, final boolean isCompaction) { if (dataBlockEncoder.useEncodedScanner()) { - return new EncodedScanner(this, cacheBlocks, pread, isCompaction, this.hfileContext); + return new EncodedScanner(this, cacheBlocks, pread, isCompaction, this.hfileContext, + HFileReaderImpl.tableName); } - return new HFileScannerImpl(this, cacheBlocks, pread, isCompaction); + return new HFileScannerImpl(this, cacheBlocks, pread, isCompaction, + HFileReaderImpl.tableName); } public int getMajorVersion() { return 3; } + + public TableName getTableName() { + return tableName; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index 5769744..16ede96 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -34,9 +34,10 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellComparator.MetaCellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.CellComparator.MetaCellComparator; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.crypto.Encryption; @@ -153,9 +154,11 @@ public class HFileWriterImpl implements HFile.Writer { protected long maxMemstoreTS = 0; + private TableName tableName; + public HFileWriterImpl(final Configuration conf, CacheConfig cacheConf, Path path, FSDataOutputStream outputStream, - CellComparator comparator, HFileContext fileContext) { + CellComparator comparator, HFileContext fileContext, TableName tableName) { this.outputStream = outputStream; this.path = path; this.name = path != null ? path.getName() : outputStream.toString(); @@ -171,6 +174,7 @@ public class HFileWriterImpl implements HFile.Writer { closeOutputStream = path != null; this.cacheConf = cacheConf; finishInit(conf); + this.tableName = tableName; if (LOG.isTraceEnabled()) { LOG.trace("Writer" + (path != null ? " for " + path : "") + " initialized with cacheConf: " + cacheConf + @@ -284,7 +288,7 @@ public class HFileWriterImpl implements HFile.Writer { boolean cacheIndexesOnWrite = cacheConf.shouldCacheIndexesOnWrite(); dataBlockIndexWriter = new HFileBlockIndex.BlockIndexWriter(blockWriter, cacheIndexesOnWrite ? cacheConf : null, - cacheIndexesOnWrite ? name : null); + cacheIndexesOnWrite ? name : null, tableName); dataBlockIndexWriter.setMaxChunkSize( HFileBlockIndex.getMaxChunkSize(conf)); inlineBlockWriters.add(dataBlockIndexWriter); @@ -484,7 +488,7 @@ public class HFileWriterImpl implements HFile.Writer { private void doCacheOnWrite(long offset) { HFileBlock cacheFormatBlock = blockWriter.getBlockForCaching(cacheConf); cacheConf.getBlockCache().cacheBlock( - new BlockCacheKey(name, offset, true, cacheFormatBlock.getBlockType()), + new BlockCacheKey(name, offset, true, cacheFormatBlock.getBlockType(), tableName), cacheFormatBlock); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index 41b46f2..528e766 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -37,6 +37,7 @@ import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; @@ -361,6 +362,8 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { long newSize = updateSizeMetrics(cb, false); map.put(cacheKey, cb); long val = elements.incrementAndGet(); + stats.incrementBlockCountForTable(cacheKey.getTableName()); + stats.incrementBlockSizeForTable(cacheKey.getTableName(), cb.heapSize()); if (LOG.isTraceEnabled()) { long size = map.size(); assertCounterSanity(size, val); @@ -440,6 +443,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { if (cb == null) { if (!repeat && updateCacheMetrics) { stats.miss(caching, cacheKey.isPrimary(), cacheKey.getBlockType()); + stats.incrementMissForTable(cacheKey.getTableName()); } // If there is another block cache then try and read there. // However if this is a retry ( second time in double checked locking ) @@ -455,7 +459,10 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { } return null; } - if (updateCacheMetrics) stats.hit(caching, cacheKey.isPrimary(), cacheKey.getBlockType()); + if (updateCacheMetrics) { + stats.hit(caching, cacheKey.isPrimary(), cacheKey.getBlockType()); + stats.incrementHitForTable(cacheKey.getTableName()); + } cb.access(count.incrementAndGet()); return cb.getBuffer(); } @@ -516,6 +523,9 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { } updateSizeMetrics(block, true); long val = elements.decrementAndGet(); + stats.decrementBlockCountForTable(block.getCacheKey().getTableName()); + stats.incrementBlockSizeForTable(block.getCacheKey().getTableName(), + -1 * ((LruCachedBlock)block).heapSize()); if (LOG.isTraceEnabled()) { long size = map.size(); assertCounterSanity(size, val); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 54dbf89..a89701a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -390,6 +390,8 @@ public class BucketCache implements BlockCache, HeapSize { this.blockNumber.incrementAndGet(); this.heapSize.addAndGet(cachedItem.heapSize()); blocksByHFile.add(cacheKey); + cacheStats.incrementBlockCountForTable(cacheKey.getTableName()); + cacheStats.incrementBlockSizeForTable(cacheKey.getTableName(), cachedItem.heapSize()); } } @@ -411,6 +413,7 @@ public class BucketCache implements BlockCache, HeapSize { if (re != null) { if (updateCacheMetrics) { cacheStats.hit(caching, key.isPrimary(), key.getBlockType()); + cacheStats.incrementHitForTable(key.getTableName()); } re.access(accessCount.incrementAndGet()); return re.getData(); @@ -437,6 +440,7 @@ public class BucketCache implements BlockCache, HeapSize { if (updateCacheMetrics) { cacheStats.hit(caching, key.isPrimary(), key.getBlockType()); cacheStats.ioHit(timeTaken); + cacheStats.incrementHitForTable(key.getTableName()); } if (cachedBlock.getMemoryType() == MemoryType.SHARED) { bucketEntry.refCount.incrementAndGet(); @@ -456,6 +460,7 @@ public class BucketCache implements BlockCache, HeapSize { } if (!repeat && updateCacheMetrics) { cacheStats.miss(caching, key.isPrimary(), key.getBlockType()); + cacheStats.incrementMissForTable(key.getTableName()); } return null; } @@ -465,8 +470,10 @@ public class BucketCache implements BlockCache, HeapSize { bucketAllocator.freeBlock(bucketEntry.offset()); realCacheSize.addAndGet(-1 * bucketEntry.getLength()); blocksByHFile.remove(cacheKey); + cacheStats.incrementBlockSizeForTable(cacheKey.getTableName(), -1 * bucketEntry.getLength()); if (decrementBlockNumber) { - this.blockNumber.decrementAndGet(); + this.blockNumber.decrementAndGet(); + cacheStats.decrementBlockCountForTable(cacheKey.getTableName()); } } @@ -511,6 +518,8 @@ public class BucketCache implements BlockCache, HeapSize { if (removedBlock != null) { this.blockNumber.decrementAndGet(); this.heapSize.addAndGet(-1 * removedBlock.getData().heapSize()); + cacheStats.decrementBlockCountForTable(cacheKey.getTableName()); + cacheStats.incrementBlockSizeForTable(cacheKey.getTableName(), -1 * removedBlock.getData().heapSize()); } return removedBlock; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index b8a5475..71cf0b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -313,13 +313,13 @@ public class HFileOutputFormat2 wl.writer = new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), fs) .withOutputDir(familydir).withBloomType(bloomType) - .withComparator(CellComparator.COMPARATOR).withFileContext(hFileContext).build(); + .withComparator(CellComparator.COMPARATOR).withFileContext(hFileContext).build(null); } else { wl.writer = new StoreFileWriter.Builder(conf, new CacheConfig(tempConf), new HFileSystem(fs)) .withOutputDir(familydir).withBloomType(bloomType) .withComparator(CellComparator.COMPARATOR).withFileContext(hFileContext) - .withFavoredNodes(favoredNodes).build(); + .withFavoredNodes(favoredNodes).build(null); } this.writers.put(family, wl); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index a23d739..745651a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -892,7 +892,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { HalfStoreFileReader halfReader = null; StoreFileWriter halfWriter = null; try { - halfReader = new HalfStoreFileReader(fs, inFile, cacheConf, reference, conf); + halfReader = new HalfStoreFileReader(fs, inFile, cacheConf, reference, conf, null); Map fileInfo = halfReader.loadFileInfo(); int blocksize = familyDescriptor.getBlocksize(); @@ -911,7 +911,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { .withFilePath(outFile) .withBloomType(bloomFilterType) .withFileContext(hFileContext) - .build(); + .build(null); HFileScanner scanner = halfReader.getScanner(false, false, false); scanner.seekTo(); do { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java index ecd2415..02a22ae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java @@ -500,7 +500,7 @@ public final class MobUtils { Path tempPath = new Path(basePath, UUID.randomUUID().toString().replaceAll("-", "")); StoreFileWriter w = new StoreFileWriter.Builder(conf, cacheConfig, fs).withFilePath(tempPath) .withComparator(CellComparator.COMPARATOR).withBloomType(family.getBloomFilterType()) - .withMaxKeyCount(maxKeyCount).withFileContext(hFileContext).build(); + .withMaxKeyCount(maxKeyCount).withFileContext(hFileContext).build(null); return w; } @@ -587,7 +587,7 @@ public final class MobUtils { StoreFileWriter w = new StoreFileWriter.Builder(conf, cacheConfig, fs) .withFilePath(new Path(basePath, mobFileName.getFileName())) .withComparator(CellComparator.COMPARATOR).withBloomType(BloomType.NONE) - .withMaxKeyCount(maxKeyCount).withFileContext(hFileContext).build(); + .withMaxKeyCount(maxKeyCount).withFileContext(hFileContext).build(null); return w; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java index 8634e37..6f0c24e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java @@ -262,7 +262,7 @@ public class HMobStore extends HStore { StoreFileWriter w = new StoreFileWriter.Builder(conf, writerCacheConf, region.getFilesystem()) .withFilePath(new Path(basePath, mobFileName.getFileName())) .withComparator(CellComparator.COMPARATOR).withBloomType(BloomType.NONE) - .withMaxKeyCount(maxKeyCount).withFileContext(hFileContext).build(); + .withMaxKeyCount(maxKeyCount).withFileContext(hFileContext).build(null); return w; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 9f6a03a..9b7034d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -4028,7 +4028,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi Set fakeStoreFiles = new HashSet(files.size()); for (Path file: files) { fakeStoreFiles.add(new StoreFile(getRegionFileSystem().getFileSystem(), file, this.conf, - null, null)); + null, null, getTableDesc().getTableName())); } getRegionFileSystem().removeStoreFiles(fakeFamilyName, fakeStoreFiles); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 4b79153..869fd9f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -609,7 +609,7 @@ public class HStore implements Store { } private StoreFile createStoreFileAndReader(final Path p) throws IOException { - StoreFileInfo info = new StoreFileInfo(conf, this.getFileSystem(), p); + StoreFileInfo info = new StoreFileInfo(conf, this.getFileSystem(), p, getTableName()); return createStoreFileAndReader(info); } @@ -617,7 +617,7 @@ public class HStore implements Store { throws IOException { info.setRegionCoprocessorHost(this.region.getCoprocessorHost()); StoreFile storeFile = new StoreFile(this.getFileSystem(), info, this.conf, this.cacheConf, - this.family.getBloomFilterType()); + this.family.getBloomFilterType(), region.getRegionInfo().getTable()); StoreFileReader r = storeFile.createReader(); r.setReplicaStoreFile(isPrimaryReplicaStore()); return storeFile; @@ -668,7 +668,7 @@ public class HStore implements Store { LOG.info("Validating hfile at " + srcPath + " for inclusion in " + "store " + this + " region " + this.getRegionInfo().getRegionNameAsString()); reader = HFile.createReader(srcPath.getFileSystem(conf), - srcPath, cacheConf, conf); + srcPath, cacheConf, conf, region.getRegionInfo().getTable()); reader.loadFileInfo(); byte[] firstKey = reader.getFirstRowKey(); @@ -1002,7 +1002,7 @@ public class HStore implements Store { if (trt != null) { builder.withTimeRangeTracker(trt); } - return builder.build(); + return builder.build(getTableName()); } private HFileContext createFileContext(Compression.Algorithm compression, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java index c5f0f7b..bccd92b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.Closeable; import java.io.IOException; import java.util.HashMap; +import java.util.Iterator; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; @@ -32,6 +33,9 @@ import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.io.hfile.BlockCache; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.CacheStats; import org.apache.hadoop.metrics2.MetricsExecutor; import com.google.common.collect.Sets; @@ -43,10 +47,13 @@ public class MetricsTableWrapperAggregateImpl implements MetricsTableWrapperAggr private Runnable runnable; private long period; private ScheduledFuture tableMetricsUpdateTask; + private BlockCache blockCache; + private CacheStats cacheStats; private ConcurrentHashMap metricsTableMap = new ConcurrentHashMap<>(); public MetricsTableWrapperAggregateImpl(final HRegionServer regionServer) { this.regionServer = regionServer; + initBlockCache(); this.period = regionServer.conf.getLong(HConstants.REGIONSERVER_METRICS_PERIOD, HConstants.DEFAULT_REGIONSERVER_METRICS_PERIOD) + 1000; this.executor = CompatibilitySingletonFactory.getInstance(MetricsExecutor.class).getExecutor(); @@ -55,11 +62,28 @@ public class MetricsTableWrapperAggregateImpl implements MetricsTableWrapperAggr TimeUnit.MILLISECONDS); } + /** + * It's possible that due to threading the block cache could not be initialized + * yet (testing multiple region servers in one jvm). So we need to try and initialize + * the blockCache and cacheStats reference multiple times until we succeed. + */ + private synchronized void initBlockCache() { + CacheConfig cacheConfig = this.regionServer.cacheConfig; + if (cacheConfig != null && this.blockCache == null) { + this.blockCache = cacheConfig.getBlockCache(); + } + + if (this.blockCache != null && this.cacheStats == null) { + this.cacheStats = blockCache.getStats(); + } + } + public class TableMetricsWrapperRunnable implements Runnable { @Override public void run() { Map localMetricsTableMap = new HashMap<>(); + initBlockCache(); for (Region r : regionServer.getOnlineRegionsLocalContext()) { TableName tbl= r.getTableDesc().getTableName(); @@ -160,6 +184,46 @@ public class MetricsTableWrapperAggregateImpl implements MetricsTableWrapperAggr } @Override + public long getBlockCacheCount(String table) { + if (cacheStats == null) + return 0; + return cacheStats.getBlockCacheCount(TableName.valueOf(table)); + } + + @Override + public long getBlockCacheSize(String table) { + if (cacheStats == null) + return 0; + return cacheStats.getBlockCacheSize(TableName.valueOf(table)); + } + + @Override + public long getBlockCacheHitCount(String table) { + if (cacheStats == null) + return 0; + return cacheStats.getBlockCacheHitCount(TableName.valueOf(table)); + } + + @Override + public long getBlockCacheMissCount(String table) { + if (cacheStats == null) + return 0; + return cacheStats.getBlockCacheMissCount(TableName.valueOf(table)); + } + + @Override + public double getBlockCacheHitPercent(String table) { + if (this.cacheStats == null) { + return 0; + } + double ratio = this.cacheStats.getBlockCacheHitRatio(TableName.valueOf(table)); + if (Double.isNaN(ratio)) { + ratio = 0; + } + return (ratio * 100); + } + + @Override public void close() throws IOException { tableMetricsUpdateTask.cancel(true); } @@ -221,5 +285,4 @@ public class MetricsTableWrapperAggregateImpl implements MetricsTableWrapperAggr this.tableSize = tableSize; } } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 589d844..02c820b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -18,11 +18,6 @@ */ package org.apache.hadoop.hbase.regionserver; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Function; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Ordering; - import java.io.IOException; import java.util.Collection; import java.util.Collections; @@ -40,6 +35,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HDFSBlocksDistribution; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.BlockType; import org.apache.hadoop.hbase.io.hfile.CacheConfig; @@ -47,6 +43,11 @@ import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.Bytes; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Function; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Ordering; + /** * A Store data file. Stores usually have one or more of these files. They * are produced by flushing the memstore to disk. To @@ -118,6 +119,8 @@ public class StoreFile { private Comparator comparator; + private static TableName tableName; + CacheConfig getCacheConf() { return cacheConf; } @@ -195,7 +198,19 @@ public class StoreFile { */ public StoreFile(final FileSystem fs, final Path p, final Configuration conf, final CacheConfig cacheConf, final BloomType cfBloomType) throws IOException { - this(fs, new StoreFileInfo(conf, fs, p), conf, cacheConf, cfBloomType); + this(fs, new StoreFileInfo(conf, fs, p, null), conf, cacheConf, cfBloomType, null); + } + + public StoreFile(final FileSystem fs, final Path p, final Configuration conf, + final CacheConfig cacheConf, final BloomType cfBloomType, TableName tableName) + throws IOException { + this(fs, new StoreFileInfo(conf, fs, p, tableName), conf, cacheConf, cfBloomType, tableName); + } + + public StoreFile(final FileSystem fs, final StoreFileInfo fileInfo, + final Configuration conf, final CacheConfig cacheConf, final BloomType cfBloomType) + throws IOException { + this(fs, fileInfo, conf, cacheConf, cfBloomType, null); } /** @@ -214,7 +229,8 @@ public class StoreFile { * @throws IOException When opening the reader fails. */ public StoreFile(final FileSystem fs, final StoreFileInfo fileInfo, final Configuration conf, - final CacheConfig cacheConf, final BloomType cfBloomType) throws IOException { + final CacheConfig cacheConf, final BloomType cfBloomType, TableName tableName) + throws IOException { this.fs = fs; this.fileInfo = fileInfo; this.cacheConf = cacheConf; @@ -226,6 +242,7 @@ public class StoreFile { "cfBloomType=" + cfBloomType + " (disabled in config)"); this.cfBloomType = BloomType.NONE; } + this.tableName = tableName; } /** @@ -238,6 +255,7 @@ public class StoreFile { this.cacheConf = other.cacheConf; this.cfBloomType = other.cfBloomType; this.metadataMap = other.metadataMap; + this.tableName = other.tableName; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java index 3c12045..d34cb36 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java @@ -26,12 +26,13 @@ import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HDFSBlocksDistribution; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; import org.apache.hadoop.hbase.io.HFileLink; import org.apache.hadoop.hbase.io.HalfStoreFileReader; @@ -100,13 +101,16 @@ public class StoreFileInfo { // timestamp on when the file was created, is 0 and ignored for reference or link files private long createdTimestamp; + private TableName tableName; + /** * Create a Store File Info * @param conf the {@link Configuration} to use * @param fs The current file system to use. * @param initialPath The {@link Path} of the file */ - public StoreFileInfo(final Configuration conf, final FileSystem fs, final Path initialPath) + public StoreFileInfo(final Configuration conf, final FileSystem fs, final Path initialPath, + TableName tableName) throws IOException { assert fs != null; assert initialPath != null; @@ -115,6 +119,7 @@ public class StoreFileInfo { this.fs = fs; this.conf = conf; this.initialPath = initialPath; + this.tableName = tableName; Path p = initialPath; if (HFileLink.isHFileLink(p)) { // HFileLink @@ -151,7 +156,7 @@ public class StoreFileInfo { */ public StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus) throws IOException { - this(conf, fs, fileStatus.getPath()); + this(conf, fs, fileStatus.getPath(), null); } /** @@ -161,7 +166,7 @@ public class StoreFileInfo { * @param fileStatus The {@link FileStatus} of the file */ public StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus, - final HFileLink link) + final HFileLink link, final TableName tableName) throws IOException { this.fs = fs; this.conf = conf; @@ -170,6 +175,7 @@ public class StoreFileInfo { // HFileLink this.reference = null; this.link = link; + this.tableName = tableName; } /** @@ -181,7 +187,7 @@ public class StoreFileInfo { * @throws IOException */ public StoreFileInfo(final Configuration conf, final FileSystem fs, final FileStatus fileStatus, - final Reference reference) + final Reference reference, final TableName tableName) throws IOException { this.fs = fs; this.conf = conf; @@ -189,6 +195,7 @@ public class StoreFileInfo { this.createdTimestamp = fileStatus.getModificationTime(); this.reference = reference; this.link = null; + this.tableName = tableName; } /** @@ -265,9 +272,9 @@ public class StoreFileInfo { if (reader == null) { if (this.reference != null) { reader = new HalfStoreFileReader(fs, this.getPath(), in, length, cacheConf, reference, - conf); + conf, tableName); } else { - reader = new StoreFileReader(fs, status.getPath(), in, length, cacheConf, conf); + reader = new StoreFileReader(fs, status.getPath(), in, length, cacheConf, conf, tableName); } } if (this.coprocessorHost != null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java index 1582237..8251a6f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java @@ -18,6 +18,12 @@ */ package org.apache.hadoop.hbase.regionserver; +import java.io.DataInput; +import java.io.IOException; +import java.util.Map; +import java.util.SortedSet; +import java.util.concurrent.atomic.AtomicInteger; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -29,6 +35,7 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper; @@ -43,12 +50,6 @@ import org.apache.hadoop.hbase.util.BloomFilter; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.Bytes; -import java.io.DataInput; -import java.io.IOException; -import java.util.Map; -import java.util.SortedSet; -import java.util.concurrent.atomic.AtomicInteger; - /** * Reader for a StoreFile. */ @@ -67,6 +68,7 @@ public class StoreFileReader { private boolean bulkLoadResult = false; private KeyValue.KeyOnlyKeyValue lastBloomKeyOnlyKV = null; private boolean skipResetSeqId = true; + private TableName tableName; public AtomicInteger getRefCount() { return refCount; @@ -79,10 +81,11 @@ public class StoreFileReader { // Indicates if the file got compacted private volatile boolean compactedAway = false; - public StoreFileReader(FileSystem fs, Path path, CacheConfig cacheConf, Configuration conf) - throws IOException { - reader = HFile.createReader(fs, path, cacheConf, conf); + public StoreFileReader(FileSystem fs, Path path, CacheConfig cacheConf, Configuration conf, + TableName tableName) throws IOException { + reader = HFile.createReader(fs, path, cacheConf, conf, tableName); bloomFilterType = BloomType.NONE; + this.tableName = tableName; } void markCompactedAway() { @@ -90,9 +93,10 @@ public class StoreFileReader { } public StoreFileReader(FileSystem fs, Path path, FSDataInputStreamWrapper in, long size, - CacheConfig cacheConf, Configuration conf) throws IOException { - reader = HFile.createReader(fs, path, in, size, cacheConf, conf); + CacheConfig cacheConf, Configuration conf, TableName tableName) throws IOException { + reader = HFile.createReader(fs, path, in, size, cacheConf, conf, tableName); bloomFilterType = BloomType.NONE; + this.tableName = tableName; } public void setReplicaStoreFile(boolean isPrimaryReplicaStoreFile) { @@ -294,7 +298,7 @@ public class StoreFileReader { if (!bloomFilter.supportsAutoLoading()) { return true; } - return bloomFilter.contains(row, rowOffset, rowLen, null); + return bloomFilter.contains(row, rowOffset, rowLen, null, tableName); } catch (IllegalArgumentException e) { LOG.error("Bad Delete Family bloom filter data -- proceeding without", e); @@ -398,12 +402,12 @@ public class StoreFileReader { exists = false; } else { exists = - bloomFilter.contains(kvKey, bloom) || - bloomFilter.contains(rowBloomKey, bloom); + bloomFilter.contains(kvKey, bloom, tableName) || + bloomFilter.contains(rowBloomKey, bloom, tableName); } } else { exists = !keyIsAfterLast - && bloomFilter.contains(key, 0, key.length, bloom); + && bloomFilter.contains(key, 0, key.length, bloom, tableName); } return exists; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java index 442b90d..ddea756 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java @@ -18,7 +18,10 @@ */ package org.apache.hadoop.hbase.regionserver; -import com.google.common.base.Preconditions; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Arrays; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -30,6 +33,7 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; @@ -40,9 +44,7 @@ import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.WritableUtils; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.Arrays; +import com.google.common.base.Preconditions; /** * A StoreFile writer. Use this to read/write HBase Store Files. It is package @@ -89,10 +91,10 @@ public class StoreFileWriter implements Compactor.CellSink { */ StoreFileWriter(FileSystem fs, Path path, final Configuration conf, CacheConfig cacheConf, final CellComparator comparator, BloomType bloomType, long maxKeys, - InetSocketAddress[] favoredNodes, HFileContext fileContext) + InetSocketAddress[] favoredNodes, HFileContext fileContext, TimeRangeTracker trt) throws IOException { this(fs, path, conf, cacheConf, comparator, bloomType, maxKeys, favoredNodes, fileContext, - null); + trt, null); } /** @@ -114,7 +116,7 @@ public class StoreFileWriter implements Compactor.CellSink { CacheConfig cacheConf, final CellComparator comparator, BloomType bloomType, long maxKeys, InetSocketAddress[] favoredNodes, HFileContext fileContext, - final TimeRangeTracker trt) + final TimeRangeTracker trt, TableName tableName) throws IOException { // If passed a TimeRangeTracker, use it. Set timeRangeTrackerSet so we don't destroy it. // TODO: put the state of the TRT on the TRT; i.e. make a read-only version (TimeRange) when @@ -126,7 +128,7 @@ public class StoreFileWriter implements Compactor.CellSink { .withComparator(comparator) .withFavoredNodes(favoredNodes) .withFileContext(fileContext) - .create(); + .create(tableName); generalBloomFilterWriter = BloomFilterFactory.createGeneralBloomAtWrite( conf, cacheConf, bloomType, @@ -518,7 +520,7 @@ public class StoreFileWriter implements Compactor.CellSink { * done. If metadata, add BEFORE closing using * {@link StoreFileWriter#appendMetadata}. */ - public StoreFileWriter build() throws IOException { + public StoreFileWriter build(TableName tableName) throws IOException { if ((dir == null ? 0 : 1) + (filePath == null ? 0 : 1) != 1) { throw new IllegalArgumentException("Either specify parent directory " + "or file path"); @@ -543,7 +545,8 @@ public class StoreFileWriter implements Compactor.CellSink { comparator = CellComparator.COMPARATOR; } return new StoreFileWriter(fs, filePath, - conf, cacheConf, comparator, bloomType, maxKeyCount, favoredNodes, fileContext, trt); + conf, cacheConf, comparator, bloomType, maxKeyCount, favoredNodes, fileContext, trt, + tableName); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java index 2062244..2140ceb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilter.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.util; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.nio.ByteBuff; @@ -83,7 +84,7 @@ public interface BloomFilter extends BloomFilterBase { * is supported. * @return true if matched by bloom, false if not */ - boolean contains(Cell keyCell, ByteBuff bloom); + boolean contains(Cell keyCell, ByteBuff bloom, TableName tableName); /** * Check if the specified key is contained in the bloom filter. @@ -95,7 +96,7 @@ public interface BloomFilter extends BloomFilterBase { * is supported. * @return true if matched by bloom, false if not */ - boolean contains(byte[] buf, int offset, int length, ByteBuff bloom); + boolean contains(byte[] buf, int offset, int length, ByteBuff bloom, TableName tableName); /** * @return true if this Bloom filter can automatically load its data diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java index ace45ec..2e7569d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java @@ -125,7 +125,7 @@ public class CompressionTest { HFile.Writer writer = HFile.getWriterFactoryNoCache(conf) .withPath(fs, path) .withFileContext(context) - .create(); + .create(null); // Write any-old Cell... final byte [] rowKey = Bytes.toBytes("compressiontestkey"); Cell c = CellUtil.createCell(rowKey, Bytes.toBytes("compressiontestval")); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java index 0655a0f..4c80eaa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java @@ -118,7 +118,7 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { // if this is a primary region, just return the StoreFileInfo constructed from path if (regionInfo.equals(regionInfoForFs)) { - return new StoreFileInfo(conf, fs, path); + return new StoreFileInfo(conf, fs, path, regionInfoForFs.getTable()); } // else create a store file link. The link file does not exists on filesystem though. @@ -127,10 +127,10 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil { if (StoreFileInfo.isReference(path)) { Reference reference = Reference.read(fs, path); - return new StoreFileInfo(conf, fs, link.getFileStatus(fs), reference); + return new StoreFileInfo(conf, fs, link.getFileStatus(fs), reference, regionInfoForFs.getTable()); } - return new StoreFileInfo(conf, fs, link.getFileStatus(fs), link); + return new StoreFileInfo(conf, fs, link.getFileStatus(fs), link, regionInfoForFs.getTable()); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java index e5aec57..197cf6a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HFilePerformanceEvaluation.java @@ -345,7 +345,7 @@ public class HFilePerformanceEvaluation { .withPath(fs, mf) .withFileContext(hFileContext) .withComparator(CellComparator.COMPARATOR) - .create(); + .create(null); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java index 7bd4f93..66a4773 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java @@ -794,7 +794,7 @@ public class TestRegionObserverInterface { HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf)) .withPath(fs, path) .withFileContext(context) - .create(); + .create(null); long now = System.currentTimeMillis(); try { for (int i =1;i<=9;i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java index 0e5f08e..e7dcd38 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHalfStoreFileReader.java @@ -91,7 +91,7 @@ public class TestHalfStoreFileReader { HFile.Writer w = HFile.getWriterFactory(conf, cacheConf) .withPath(fs, p) .withFileContext(meta) - .create(); + .create(null); // write some things. List items = genSomeKeys(); @@ -120,7 +120,7 @@ public class TestHalfStoreFileReader { CacheConfig cacheConf) throws IOException { final HalfStoreFileReader halfreader = new HalfStoreFileReader(fs, p, - cacheConf, bottom, TEST_UTIL.getConfiguration()); + cacheConf, bottom, TEST_UTIL.getConfiguration(), null); halfreader.loadFileInfo(); final HFileScanner scanner = halfreader.getScanner(false, false); @@ -155,7 +155,7 @@ public class TestHalfStoreFileReader { HFile.Writer w = HFile.getWriterFactory(conf, cacheConf) .withPath(fs, p) .withFileContext(meta) - .create(); + .create(null); // write some things. List items = genSomeKeys(); @@ -220,7 +220,7 @@ public class TestHalfStoreFileReader { CacheConfig cacheConfig) throws IOException { final HalfStoreFileReader halfreader = new HalfStoreFileReader(fs, p, - cacheConfig, bottom, TEST_UTIL.getConfiguration()); + cacheConfig, bottom, TEST_UTIL.getConfiguration(), null); halfreader.loadFileInfo(); final HFileScanner scanner = halfreader.getScanner(false, false); scanner.seekBefore(seekBefore); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java index b9cbb16..5fbff9d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java @@ -269,7 +269,7 @@ public class TestCacheOnWrite { // Flags: don't cache the block, use pread, this is not a compaction. // Also, pass null for expected block type to avoid checking it. HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, - encodingInCache); + encodingInCache, null); BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset); HFileBlock fromCache = (HFileBlock) blockCache.getBlock(blockCacheKey, true, false, true); @@ -372,7 +372,7 @@ public class TestCacheOnWrite { StoreFileWriter sfw = new StoreFileWriter.Builder(conf, cacheConf, fs) .withOutputDir(storeFileParentDir).withComparator(CellComparator.COMPARATOR) .withFileContext(meta) - .withBloomType(BLOOM_TYPE).withMaxKeyCount(NUM_KV).build(); + .withBloomType(BLOOM_TYPE).withMaxKeyCount(NUM_KV).build(null); byte[] cf = Bytes.toBytes("fam"); for (int i = 0; i < NUM_KV; ++i) { byte[] row = RandomKeyValueUtil.randomOrderedKey(rand, i); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java index 73e580c..ed2133f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java @@ -94,7 +94,7 @@ public class TestHFile { Path f = new Path(ROOT_DIR, testName.getMethodName()); HFileContext context = new HFileContextBuilder().withIncludesTags(false).build(); Writer w = - HFile.getWriterFactory(conf, cacheConf).withPath(fs, f).withFileContext(context).create(); + HFile.getWriterFactory(conf, cacheConf).withPath(fs, f).withFileContext(context).create(null); w.close(); Reader r = HFile.createReader(fs, f, cacheConf, conf); r.loadFileInfo(); @@ -145,7 +145,7 @@ public class TestHFile { Path f = new Path(ROOT_DIR, testName.getMethodName()); HFileContext context = new HFileContextBuilder().build(); Writer w = HFile.getWriterFactory(conf, cacheConf).withPath(this.fs, f) - .withFileContext(context).create(); + .withFileContext(context).create(null); writeSomeRecords(w, 0, 100, false); w.close(); @@ -254,7 +254,7 @@ public class TestHFile { .withOutputStream(fout) .withFileContext(meta) .withComparator(CellComparator.COMPARATOR) - .create(); + .create(null); LOG.info(writer); writeRecords(writer, useTags); fout.close(); @@ -348,7 +348,7 @@ public class TestHFile { Writer writer = HFile.getWriterFactory(conf, cacheConf) .withOutputStream(fout) .withFileContext(meta) - .create(); + .create(null); someTestingWithMetaBlock(writer); writer.close(); fout.close(); @@ -383,7 +383,7 @@ public class TestHFile { Writer writer = HFile.getWriterFactory(conf, cacheConf) .withOutputStream(fout) .withFileContext(meta) - .create(); + .create(null); KeyValue kv = new KeyValue("foo".getBytes(), "f1".getBytes(), null, "value".getBytes()); writer.append(kv); writer.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java index 470d483..8ab9d3b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; @@ -98,6 +99,7 @@ public class TestHFileBlockIndex { private final Compression.Algorithm compr; private byte[] firstKeyInFile; private Configuration conf; + private TableName tableName; private static final int[] INDEX_CHUNK_SIZES = { 4096, 512, 384 }; private static final int[] EXPECTED_NUM_LEVELS = { 2, 3, 4 }; @@ -176,7 +178,7 @@ public class TestHFileBlockIndex { public HFileBlock readBlock(long offset, long onDiskSize, boolean cacheBlock, boolean pread, boolean isCompaction, boolean updateCacheMetrics, BlockType expectedBlockType, - DataBlockEncoding expectedDataBlockEncoding) + DataBlockEncoding expectedDataBlockEncoding, TableName tableName) throws IOException { if (offset == prevOffset && onDiskSize == prevOnDiskSize && pread == prevPread) { @@ -189,6 +191,7 @@ public class TestHFileBlockIndex { prevOffset = offset; prevOnDiskSize = onDiskSize; prevPread = pread; + tableName = tableName; return prevBlock; } @@ -267,7 +270,7 @@ public class TestHFileBlockIndex { meta); FSDataOutputStream outputStream = fs.create(path); HFileBlockIndex.BlockIndexWriter biw = - new HFileBlockIndex.BlockIndexWriter(hbw, null, null); + new HFileBlockIndex.BlockIndexWriter(hbw, null, null, tableName); for (int i = 0; i < NUM_DATA_BLOCKS; ++i) { hbw.startWriting(BlockType.DATA).write(String.valueOf(rand.nextInt(1000)).getBytes()); @@ -550,7 +553,7 @@ public class TestHFileBlockIndex { HFile.getWriterFactory(conf, cacheConf) .withPath(fs, hfilePath) .withFileContext(meta) - .create(); + .create(tableName); Random rand = new Random(19231737); byte[] family = Bytes.toBytes("f"); byte[] qualifier = Bytes.toBytes("q"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java index 3264558..b2e85d5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileEncryption.java @@ -168,7 +168,7 @@ public class TestHFileEncryption { HFile.Writer writer = HFile.getWriterFactory(conf, cacheConf) .withOutputStream(out) .withFileContext(fileContext) - .create(); + .create(null); try { KeyValue kv = new KeyValue("foo".getBytes(), "f1".getBytes(), null, "value".getBytes()); writer.append(kv); @@ -216,7 +216,7 @@ public class TestHFileEncryption { HFile.Writer writer = HFile.getWriterFactory(conf, cacheConf) .withOutputStream(out) .withFileContext(fileContext) - .create(); + .create(null); try { for (KeyValue kv: testKvs) { writer.append(kv); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java index af4f2b8..933304b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java @@ -57,7 +57,7 @@ public class TestHFileInlineToRootChunkConversion { HFileContext context = new HFileContextBuilder().withBlockSize(16).build(); HFile.Writer hfw = new HFile.WriterFactory(conf, cacheConf) .withFileContext(context) - .withPath(fs, hfPath).create(); + .withPath(fs, hfPath).create(null); List keys = new ArrayList(); StringBuilder sb = new StringBuilder(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java index 5cc2580..00a083a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileSeek.java @@ -138,7 +138,7 @@ public class TestHFileSeek extends TestCase { .withOutputStream(fout) .withFileContext(context) .withComparator(CellComparator.COMPARATOR) - .create(); + .create(null); try { BytesWritable key = new BytesWritable(); BytesWritable val = new BytesWritable(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java index 983ec2f..cd8242c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.java @@ -125,7 +125,7 @@ public class TestHFileWriterV3 { .withPath(fs, hfilePath) .withFileContext(context) .withComparator(CellComparator.COMPARATOR) - .create(); + .create(null); Random rand = new Random(9713312); // Just a fixed seed. List keyValues = new ArrayList(entryCount); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java index 5f73500..9a54688 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestLazyDataBlockDecompression.java @@ -92,7 +92,7 @@ public class TestLazyDataBlockDecompression { HFile.Writer writer = new HFile.WriterFactory(conf, cc) .withPath(fs, path) .withFileContext(cxt) - .create(); + .create(null); // write a bunch of random kv's Random rand = new Random(9713312); // some seed. @@ -118,7 +118,7 @@ public class TestLazyDataBlockDecompression { FixedFileTrailer trailer = FixedFileTrailer.readFromStream(fsdis.getStream(false), fileSize); HFile.Reader reader = new HFileReaderImpl(path, trailer, fsdis, fileSize, cacheConfig, - fsdis.getHfs(), conf); + fsdis.getHfs(), conf, null); reader.loadFileInfo(); long offset = trailer.getFirstDataBlockOffset(), max = trailer.getLastDataBlockOffset(); @@ -126,7 +126,7 @@ public class TestLazyDataBlockDecompression { HFileBlock block; while (offset <= max) { block = reader.readBlock(offset, -1, /* cacheBlock */ true, /* pread */ false, - /* isCompaction */ false, /* updateCacheMetrics */ true, null, null); + /* isCompaction */ false, /* updateCacheMetrics */ true, null, null, null); offset += block.getOnDiskSizeWithHeader(); blocks.add(block); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java index 4c3db03..cdd9a5f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java @@ -17,7 +17,8 @@ */ package org.apache.hadoop.hbase.io.hfile; -import static org.junit.Assert.*; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.IOException; import java.util.Random; @@ -26,16 +27,15 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.fs.HFileSystem; - import org.apache.hadoop.hbase.regionserver.StoreFileWriter; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -92,7 +92,7 @@ public class TestPrefetch { BlockCache blockCache = cacheConf.getBlockCache(); long offset = 0; while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) { - HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, null); + HFileBlock block = reader.readBlock(offset, -1, false, true, false, true, null, null, null); BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset); boolean isCached = blockCache.getBlock(blockCacheKey, true, false, true) != null; if (block.getBlockType() == BlockType.DATA || @@ -113,7 +113,7 @@ public class TestPrefetch { .withOutputDir(storeFileParentDir) .withComparator(CellComparator.COMPARATOR) .withFileContext(meta) - .build(); + .build(null); final int rowLen = 32; for (int i = 0; i < NUM_KV; ++i) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java index 90e398d..b78c0ab 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestReseekTo.java @@ -67,7 +67,7 @@ public class TestReseekTo { .withFileContext(context) // NOTE: This test is dependent on this deprecated nonstandard comparator .withComparator(CellComparator.COMPARATOR) - .create(); + .create(null); int numberOfKeys = 1000; String valueString = "Value"; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java index d46af4a..a3db19d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekBeforeWithInlineBlocks.java @@ -120,7 +120,7 @@ public class TestSeekBeforeWithInlineBlocks { .withFilePath(hfilePath) .withFileContext(meta) .withBloomType(bloomType) - .build(); + .build(null); for (int i = 0; i < NUM_KV; i++) { byte[] row = RandomKeyValueUtil.randomOrderedKey(RAND, i); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java index 9ec6dc9..2eaccb2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestSeekTo.java @@ -122,7 +122,7 @@ public class TestSeekTo { Configuration conf = TEST_UTIL.getConfiguration(); HFile.Writer writer = HFile.getWriterFactoryNoCache(conf).withOutputStream(fout) .withFileContext(context) - .withComparator(CellComparator.COMPARATOR).create(); + .withComparator(CellComparator.COMPARATOR).create(null); // 4 bytes * 3 * 2 for each key/value + // 3 for keys, 15 for values = 42 (woot) writer.append(toKV("c", tagUsage)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java index 276fedb..028997f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestCachedMobFile.java @@ -58,7 +58,7 @@ public class TestCachedMobFile extends TestCase{ FileSystem fs = testDir.getFileSystem(conf); HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build(); StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs) - .withOutputDir(testDir).withFileContext(meta).build(); + .withOutputDir(testDir).withFileContext(meta).build(null); MobTestUtil.writeStoreFile(writer, caseName); CachedMobFile cachedMobFile = CachedMobFile.create(fs, writer.getPath(), conf, cacheConf); Assert.assertEquals(EXPECTED_REFERENCE_ZERO, cachedMobFile.getReferenceCount()); @@ -80,14 +80,14 @@ public class TestCachedMobFile extends TestCase{ Path outputDir1 = new Path(testDir, FAMILY1); HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); StoreFileWriter writer1 = new StoreFileWriter.Builder(conf, cacheConf, fs) - .withOutputDir(outputDir1).withFileContext(meta).build(); + .withOutputDir(outputDir1).withFileContext(meta).build(null); MobTestUtil.writeStoreFile(writer1, caseName); CachedMobFile cachedMobFile1 = CachedMobFile.create(fs, writer1.getPath(), conf, cacheConf); Path outputDir2 = new Path(testDir, FAMILY2); StoreFileWriter writer2 = new StoreFileWriter.Builder(conf, cacheConf, fs) .withOutputDir(outputDir2) .withFileContext(meta) - .build(); + .build(null); MobTestUtil.writeStoreFile(writer2, caseName); CachedMobFile cachedMobFile2 = CachedMobFile.create(fs, writer2.getPath(), conf, cacheConf); cachedMobFile1.access(1); @@ -103,7 +103,7 @@ public class TestCachedMobFile extends TestCase{ FileSystem fs = testDir.getFileSystem(conf); HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build(); StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs) - .withOutputDir(testDir).withFileContext(meta).build(); + .withOutputDir(testDir).withFileContext(meta).build(null); String caseName = getName(); MobTestUtil.writeStoreFile(writer, caseName); CachedMobFile cachedMobFile = CachedMobFile.create(fs, writer.getPath(), conf, cacheConf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java index 84a2ba7..bae1e96 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/TestMobFile.java @@ -56,7 +56,7 @@ public class TestMobFile extends TestCase { StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs) .withOutputDir(testDir) .withFileContext(meta) - .build(); + .build(null); String caseName = getName(); MobTestUtil.writeStoreFile(writer, caseName); @@ -109,7 +109,7 @@ public class TestMobFile extends TestCase { StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs) .withOutputDir(testDir) .withFileContext(meta) - .build(); + .build(null); MobTestUtil.writeStoreFile(writer, getName()); MobFile mobFile = new MobFile(new StoreFile(fs, writer.getPath(), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java index 7970d62..e1f9af3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mob/compactions/TestPartitionedMobCompactor.java @@ -303,7 +303,7 @@ public class TestPartitionedMobCompactor { new Date()), mobSuffix); } StoreFileWriter mobFileWriter = new StoreFileWriter.Builder(conf, cacheConf, fs) - .withFileContext(meta).withFilePath(new Path(basePath, mobFileName.getFileName())).build(); + .withFileContext(meta).withFilePath(new Path(basePath, mobFileName.getFileName())).build(null); writeStoreFile(mobFileWriter, startRow, Bytes.toBytes(family), Bytes.toBytes(qualifier), type, (i+1)*1000); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java index a311501..d179860 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.java @@ -191,7 +191,7 @@ public class CreateRandomStoreFile { .withBloomType(bloomType) .withMaxKeyCount(numKV) .withFileContext(meta) - .build(); + .build(null); rand = new Random(); LOG.info("Writing " + numKV + " key/value pairs"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java index 6fd8dd7..5571001 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java @@ -56,6 +56,31 @@ public class MetricsTableWrapperStub implements MetricsTableWrapperAggregate { return 3000; } + @Override + public long getBlockCacheSize(String table) { + return 5000; + } + + @Override + public long getBlockCacheCount(String table) { + return 100; + } + + @Override + public long getBlockCacheHitCount(String table) { + return 800; + } + + @Override + public long getBlockCacheMissCount(String table) { + return 200; + } + + @Override + public double getBlockCacheHitPercent(String table) { + return 80.0; + } + public String getTableName() { return tableName; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java index 6ba12a9..5271b0b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBulkLoad.java @@ -277,7 +277,7 @@ public class TestBulkLoad { try { hFileFactory.withOutputStream(out); hFileFactory.withFileContext(new HFileContext()); - HFile.Writer writer = hFileFactory.create(); + HFile.Writer writer = hFileFactory.create(null); try { writer.append(new KeyValue(CellUtil.createCell(randomBytes, family, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java index 4a73eda..10954ea 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java @@ -231,7 +231,7 @@ public class TestCacheOnWriteInSchema { // Flags: don't cache the block, use pread, this is not a compaction. // Also, pass null for expected block type to avoid checking it. HFileBlock block = reader.readBlock(offset, -1, false, true, - false, true, null, DataBlockEncoding.NONE); + false, true, null, DataBlockEncoding.NONE, null); BlockCacheKey blockCacheKey = new BlockCacheKey(reader.getName(), offset); boolean isCached = cache.getBlock(blockCacheKey, true, false, true) != null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java index d0c0089..5b8a839 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java @@ -305,7 +305,7 @@ public class TestCompoundBloomFilter { .withOutputDir(TEST_UTIL.getDataTestDir()) .withBloomType(bt) .withFileContext(meta) - .build(); + .build(null); assertTrue(w.hasGeneralBloom()); assertTrue(w.getGeneralBloomWriter() instanceof CompoundBloomFilterWriter); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java index 55f882a..1acbfe4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFSErrorsExposed.java @@ -85,7 +85,7 @@ public class TestFSErrorsExposed { util.getConfiguration(), cacheConf, hfs) .withOutputDir(hfilePath) .withFileContext(meta) - .build(); + .build(null); TestStoreFile.writeStoreFile( writer, Bytes.toBytes("cf"), Bytes.toBytes("qual")); @@ -135,7 +135,7 @@ public class TestFSErrorsExposed { util.getConfiguration(), cacheConf, hfs) .withOutputDir(hfilePath) .withFileContext(meta) - .build(); + .build(null); TestStoreFile.writeStoreFile( writer, Bytes.toBytes("cf"), Bytes.toBytes("qual")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java index a3804dd..293e288 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java @@ -1617,7 +1617,7 @@ public class TestHRegionReplayEvents { try { hFileFactory.withOutputStream(out); hFileFactory.withFileContext(new HFileContext()); - HFile.Writer writer = hFileFactory.create(); + HFile.Writer writer = hFileFactory.create(null); try { writer.append(new KeyValue(CellUtil.createCell(valueBytes, family, valueBytes, 0l, KeyValue.Type.Put.getCode(), valueBytes))); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java index bd5c91e..b6e0fb5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java @@ -148,7 +148,7 @@ public class TestHRegionServerBulkLoad { .getWriterFactory(conf, new CacheConfig(conf)) .withPath(fs, path) .withFileContext(context) - .create(); + .create(null); long now = System.currentTimeMillis(); try { // subtract 2 since iterateOnSplits doesn't include boundary keys diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableAggregate.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableAggregate.java index c114298..2ebab48 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableAggregate.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableAggregate.java @@ -17,12 +17,26 @@ */ package org.apache.hadoop.hbase.regionserver; +import static org.junit.Assert.assertEquals; import java.io.IOException; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CompatibilityFactory; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MiniHBaseCluster; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.test.MetricsAssertHelper; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Threads; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -32,6 +46,41 @@ public class TestMetricsTableAggregate { public static MetricsAssertHelper HELPER = CompatibilityFactory.getInstance(MetricsAssertHelper.class); + private static MiniHBaseCluster cluster; + private static HRegionServer rs; + private static Configuration conf; + private static HBaseTestingUtility TEST_UTIL; + private static MetricsRegionServer metricsRegionServer; + + @BeforeClass + public static void startCluster() throws Exception { + TEST_UTIL = new HBaseTestingUtility(); + conf = TEST_UTIL.getConfiguration(); + conf.getLong("hbase.splitlog.max.resubmit", 0); + // Make the failure test faster + conf.setInt("zookeeper.recovery.retry", 0); + conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1); + + TEST_UTIL.startMiniCluster(1, 1); + cluster = TEST_UTIL.getHBaseCluster(); + + cluster.waitForActiveAndReadyMaster(); + + while (cluster.getLiveRegionServerThreads().size() < 1) { + Threads.sleep(100); + } + + rs = cluster.getRegionServer(0); + metricsRegionServer = rs.getRegionServerMetrics(); + } + + @AfterClass + public static void after() throws Exception { + if (TEST_UTIL != null) { + TEST_UTIL.shutdownMiniCluster(); + } + } + @Test public void testTableWrapperAggregateMetrics() throws IOException { String tableName = "testTableMetrics"; @@ -48,5 +97,36 @@ public class TestMetricsTableAggregate { HELPER.assertGauge("Namespace_default_table_testTableMetrics_metric_memstoreSize", 1000, agg); HELPER.assertGauge("Namespace_default_table_testTableMetrics_metric_storeFileSize", 2000, agg); HELPER.assertGauge("Namespace_default_table_testTableMetrics_metric_tableSize", 3000, agg); + + HELPER.assertCounter("Namespace_default_table_testTableMetrics_metric_blockCacheCount", 100, agg); + HELPER.assertGauge("Namespace_default_table_testTableMetrics_metric_blockCacheSize", 5000, agg); + HELPER.assertCounter("Namespace_default_table_testTableMetrics_metric_blockCacheHitCount", 800, agg); + HELPER.assertCounter("Namespace_default_table_testTableMetrics_metric_blockCacheMissCount", 200, agg); + HELPER.assertGauge("Namespace_default_table_testTableMetrics_metric_blockCacheHitPercent", 80, agg); + } + + @Test + public void testBlockCacheTableMetrics() throws IOException { + final TableName TABLENAME1 = TableName.valueOf("usertable"); + final byte[] FAMILY = Bytes.toBytes("family"); + final byte[] COLUMN1 = Bytes.toBytes("c1"); + + try (Table t1 = TEST_UTIL.createTable(TABLENAME1, FAMILY);) { + // put rows into the first table + for (int i = 0; i < 1; i++) { + Put p = new Put(Bytes.toBytes("row" + i)); + p.addColumn(FAMILY, COLUMN1, COLUMN1); + t1.put(p); + } + TEST_UTIL.flush(TABLENAME1); + Get get = new Get(Bytes.toBytes("row0")); + get.addFamily(FAMILY); + get.addColumn(FAMILY, COLUMN1); + Result result = t1.get(get); + long rsize = result.getColumnLatestCell(FAMILY, COLUMN1).getRowLength(); + assertEquals(rsize, 4); + } finally { + TEST_UTIL.deleteTable(TABLENAME1); + } } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java index 5cbca4b..68f755e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java @@ -316,7 +316,7 @@ public class TestMobStoreCompaction { private void createHFile(Path path, int rowIdx, byte[] dummyData) throws IOException { HFileContext meta = new HFileContextBuilder().build(); HFile.Writer writer = HFile.getWriterFactory(conf, new CacheConfig(conf)).withPath(fs, path) - .withFileContext(meta).create(); + .withFileContext(meta).create(null); long now = System.currentTimeMillis(); try { KeyValue kv = new KeyValue(Bytes.add(STARTROW, Bytes.toBytes(rowIdx)), COLUMN_FAMILY, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java index 0ec859c..10bb780 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestReversibleScanners.java @@ -99,7 +99,7 @@ public class TestReversibleScanners { HFileContext hFileContext = hcBuilder.build(); StoreFileWriter writer = new StoreFileWriter.Builder( TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir(hfilePath) - .withFileContext(hFileContext).build(); + .withFileContext(hFileContext).build(null); writeStoreFile(writer); StoreFile sf = new StoreFile(fs, writer.getPath(), @@ -147,10 +147,10 @@ public class TestReversibleScanners { HFileContext hFileContext = hcBuilder.build(); StoreFileWriter writer1 = new StoreFileWriter.Builder( TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir( - hfilePath).withFileContext(hFileContext).build(); + hfilePath).withFileContext(hFileContext).build(null); StoreFileWriter writer2 = new StoreFileWriter.Builder( TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir( - hfilePath).withFileContext(hFileContext).build(); + hfilePath).withFileContext(hFileContext).build(null); MemStore memstore = new DefaultMemStore(); writeMemstoreAndStoreFiles(memstore, new StoreFileWriter[] { writer1, @@ -237,10 +237,10 @@ public class TestReversibleScanners { HFileContext hFileContext = hcBuilder.build(); StoreFileWriter writer1 = new StoreFileWriter.Builder( TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir( - hfilePath).withFileContext(hFileContext).build(); + hfilePath).withFileContext(hFileContext).build(null); StoreFileWriter writer2 = new StoreFileWriter.Builder( TEST_UTIL.getConfiguration(), cacheConf, fs).withOutputDir( - hfilePath).withFileContext(hFileContext).build(); + hfilePath).withFileContext(hFileContext).build(null); MemStore memstore = new DefaultMemStore(); writeMemstoreAndStoreFiles(memstore, new StoreFileWriter[] { writer1, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java index 623d75b..f54bc7b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestScannerWithBulkload.java @@ -149,7 +149,7 @@ public class TestScannerWithBulkload { HFile.WriterFactory wf = HFile.getWriterFactoryNoCache(TEST_UTIL.getConfiguration()); Assert.assertNotNull(wf); HFileContext context = new HFileContext(); - HFile.Writer writer = wf.withPath(fs, path).withFileContext(context).create(); + HFile.Writer writer = wf.withPath(fs, path).withFileContext(context).create(null); KeyValue kv = new KeyValue(Bytes.toBytes("row1"), Bytes.toBytes("col"), Bytes.toBytes("q"), l, Bytes.toBytes("version2")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java index e3e62fc..40960e8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java @@ -439,7 +439,7 @@ public class TestStore { fs) .withOutputDir(storedir) .withFileContext(meta) - .build(); + .build(null); w.appendMetadata(seqid + 1, false); w.close(); this.store.close(); @@ -1014,7 +1014,7 @@ public class TestStore { fs) .withOutputDir(storedir) .withFileContext(fileContext) - .build(); + .build(null); w.appendMetadata(seqid + 1, false); w.close(); LOG.info("Added store file:" + w.getPath()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java index ab0c173..550e9b4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java @@ -112,7 +112,7 @@ public class TestStoreFile extends HBaseTestCase { StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(regionFs.createTempName()) .withFileContext(meta) - .build(); + .build(null); writeStoreFile(writer); Path sfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath()); @@ -165,7 +165,7 @@ public class TestStoreFile extends HBaseTestCase { StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(regionFs.createTempName()) .withFileContext(meta) - .build(); + .build(null); writeStoreFile(writer); Path hsfPath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath()); @@ -231,7 +231,7 @@ public class TestStoreFile extends HBaseTestCase { StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(regionFs.createTempName()) .withFileContext(meta) - .build(); + .build(null); writeStoreFile(writer); Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath()); @@ -241,7 +241,7 @@ public class TestStoreFile extends HBaseTestCase { HFileLink.createHFileLinkName(hri, storeFilePath.getName())); // Try to open store file from link - StoreFileInfo storeFileInfo = new StoreFileInfo(testConf, this.fs, linkFilePath); + StoreFileInfo storeFileInfo = new StoreFileInfo(testConf, this.fs, linkFilePath, null); StoreFile hsf = new StoreFile(this.fs, storeFileInfo, testConf, cacheConf, BloomType.NONE); assertTrue(storeFileInfo.isLink()); @@ -276,7 +276,7 @@ public class TestStoreFile extends HBaseTestCase { StoreFileWriter writer = new StoreFileWriter.Builder(testConf, cacheConf, this.fs) .withFilePath(regionFs.createTempName()) .withFileContext(meta) - .build(); + .build(null); writeStoreFile(writer); Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath()); @@ -497,7 +497,7 @@ public class TestStoreFile extends HBaseTestCase { } writer.close(); - StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf); + StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf, null); reader.loadFileInfo(); reader.loadBloomfilter(); StoreFileScanner scanner = reader.getStoreFileScanner(false, false); @@ -550,7 +550,7 @@ public class TestStoreFile extends HBaseTestCase { .withBloomType(BloomType.ROW) .withMaxKeyCount(2000) .withFileContext(meta) - .build(); + .build(null); bloomWriteRead(writer, fs); } @@ -573,7 +573,7 @@ public class TestStoreFile extends HBaseTestCase { .withFilePath(f) .withMaxKeyCount(2000) .withFileContext(meta) - .build(); + .build(null); // add delete family long now = System.currentTimeMillis(); @@ -585,7 +585,7 @@ public class TestStoreFile extends HBaseTestCase { } writer.close(); - StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf); + StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf, null); reader.loadFileInfo(); reader.loadBloomfilter(); @@ -626,12 +626,12 @@ public class TestStoreFile extends HBaseTestCase { StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withFilePath(f) .withFileContext(meta) - .build(); + .build(null); writeStoreFile(writer); writer.close(); - StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf); + StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf, null); // Now do reseek with empty KV to position to the beginning of the file @@ -674,7 +674,7 @@ public class TestStoreFile extends HBaseTestCase { .withBloomType(bt[x]) .withMaxKeyCount(expKeys[x]) .withFileContext(meta) - .build(); + .build(null); long now = System.currentTimeMillis(); for (int i = 0; i < rowCount*2; i += 2) { // rows @@ -691,7 +691,7 @@ public class TestStoreFile extends HBaseTestCase { } writer.close(); - StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf); + StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf, null); reader.loadFileInfo(); reader.loadBloomfilter(); StoreFileScanner scanner = reader.getStoreFileScanner(false, false); @@ -828,7 +828,7 @@ public class TestStoreFile extends HBaseTestCase { StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs) .withOutputDir(dir) .withFileContext(meta) - .build(); + .build(null); List kvList = getKeyValueSet(timestamps,numRows, qualifier, family); @@ -1034,7 +1034,7 @@ public class TestStoreFile extends HBaseTestCase { .withFilePath(path) .withMaxKeyCount(2000) .withFileContext(meta) - .build(); + .build(null); // We'll write N-1 KVs to ensure we don't write an extra block kvs.remove(kvs.size()-1); for (KeyValue kv : kvs) { @@ -1071,7 +1071,7 @@ public class TestStoreFile extends HBaseTestCase { .withFilePath(path) .withMaxKeyCount(2000) .withFileContext(meta) - .build(); + .build(null); writer.close(); StoreFile storeFile = new StoreFile(fs, writer.getPath(), conf, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java index 349ec1c..55d2b83 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileInfo.java @@ -76,9 +76,9 @@ public class TestStoreFileInfo { new Path(mob, "f1"), new Path(archive, "f1")); StoreFileInfo info1 = new StoreFileInfo(TEST_UTIL.getConfiguration(), - TEST_UTIL.getTestFileSystem(), null, link1); + TEST_UTIL.getTestFileSystem(), null, link1, null); StoreFileInfo info2 = new StoreFileInfo(TEST_UTIL.getConfiguration(), - TEST_UTIL.getTestFileSystem(), null, link2); + TEST_UTIL.getTestFileSystem(), null, link2, null); assertEquals(info1, info2); assertEquals(info1.hashCode(), info2.hashCode()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java index 577940b..1ca919f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFileScannerWithTagCompression.java @@ -69,12 +69,12 @@ public class TestStoreFileScannerWithTagCompression { .withCompressTags(true).withDataBlockEncoding(DataBlockEncoding.PREFIX).build(); // Make a store file and write data to it. StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs).withFilePath(f) - .withFileContext(meta).build(); + .withFileContext(meta).build(null); writeStoreFile(writer); writer.close(); - StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf); + StoreFileReader reader = new StoreFileReader(fs, f, cacheConf, conf, null); StoreFileScanner s = reader.getStoreFileScanner(false, false); try { // Now do reseek with empty KV to position to the beginning of the file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java index f58e24e..a802314 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java @@ -1074,7 +1074,7 @@ public class TestAccessController extends SecureTestUtil { writer = HFile.getWriterFactory(conf, new CacheConfig(conf)) .withPath(fs, path) .withFileContext(context) - .create(); + .create(null); // subtract 2 since numRows doesn't include boundary keys for (byte[] key : Bytes.iterateOnSplits(startKey, endKey, true, numRows-2)) { KeyValue kv = new KeyValue(key, family, qualifier, now, key); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java index fbd79c3..e527c89 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileTestUtil.java @@ -110,7 +110,7 @@ public class HFileTestUtil { HFile.Writer writer = HFile.getWriterFactory(configuration, new CacheConfig(configuration)) .withPath(fs, path) .withFileContext(meta) - .create(); + .create(null); long now = System.currentTimeMillis(); try { // subtract 2 since iterateOnSplits doesn't include boundary keys diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala index a9b38ba..6a1349c 100644 --- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala +++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/HBaseContext.scala @@ -902,7 +902,7 @@ class HBaseContext(@transient sc: SparkContext, .withBloomType(BloomType.valueOf(familyOptions.bloomType)) .withComparator(CellComparator.COMPARATOR).withFileContext(hFileContext) .withFilePath(new Path(familydir, "_" + UUID.randomUUID.toString.replaceAll("-", ""))) - .withFavoredNodes(favoredNodes).build()) + .withFavoredNodes(favoredNodes).build(null)) }