From 16bae3482e6ed4ebc81df1158f91ae35aa36cae7 Mon Sep 17 00:00:00 2001 From: stack Date: Wed, 13 Apr 2016 21:49:28 -0700 Subject: [PATCH 1/3] HBASE-15650 Remove TimeRangeTracker as point of contention when many threads reading a StoreFile Refactor so we use the immutable, unsynchronized TimeRange when doing time-based checks at read time rather than use heavily synchronized TimeRangeTracker; let TimeRangeTracker be for write-time only. M hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java Make allTime final. Add a includesTimeRange method copied from TimeRangeTracker. M hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java Change name of a few methods so they match TimeRange methods that do same thing. (getTimeRangeTracker, getTimeRange, toTimeRange) add utility methods M hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java Change Reader to use TimeRange-based checks instead of TimeRangeTracker. --- .../java/org/apache/hadoop/hbase/io/TimeRange.java | 25 +++++++---- .../hadoop/hbase/io/hfile/HFilePrettyPrinter.java | 3 +- .../hadoop/hbase/regionserver/DefaultMemStore.java | 7 ++-- .../hadoop/hbase/regionserver/StoreFile.java | 48 ++++++++-------------- .../hbase/regionserver/TimeRangeTracker.java | 48 ++++++++++++++++++---- .../hbase/mapreduce/TestHFileOutputFormat.java | 7 ++-- .../hbase/mapreduce/TestHFileOutputFormat2.java | 8 ++-- .../hadoop/hbase/regionserver/MockStoreFile.java | 8 ++-- .../hbase/regionserver/TestTimeRangeTracker.java | 12 +++--- 9 files changed, 95 insertions(+), 71 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java index 4ec062d..a5f665c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java @@ -40,7 +40,7 @@ public class TimeRange { private static final long MAX_TIME = Long.MAX_VALUE; private long minStamp = MIN_TIME; private long maxStamp = MAX_TIME; - private boolean allTime = false; + private final boolean allTime; /** * Default constructor. @@ -60,9 +60,7 @@ public class TimeRange { @Deprecated public TimeRange(long minStamp) { this.minStamp = minStamp; - if (this.minStamp == MIN_TIME){ - this.allTime = true; - } + this.allTime = this.minStamp == MIN_TIME; } /** @@ -73,6 +71,7 @@ public class TimeRange { @Deprecated public TimeRange(byte [] minStamp) { this.minStamp = Bytes.toLong(minStamp); + this.allTime = false; } /** @@ -94,9 +93,7 @@ public class TimeRange { } this.minStamp = minStamp; this.maxStamp = maxStamp; - if (this.minStamp == MIN_TIME && this.maxStamp == MAX_TIME){ - this.allTime = true; - } + this.allTime = this.minStamp == MIN_TIME && this.maxStamp == MAX_TIME; } /** @@ -157,12 +154,24 @@ public class TimeRange { * @return true if within TimeRange, false if not */ public boolean withinTimeRange(long timestamp) { - if(allTime) return true; + if (allTime) return true; // check if >= minStamp return (minStamp <= timestamp && timestamp < maxStamp); } /** + * Check if the range has any overlap with TimeRange + * @param tr TimeRange + * @return True if there is overlap, false otherwise + */ + // This method came from TimeRangeTracker. We used to go there for this function but better + // to come here to the immutable, unsynchronized datastructure at read time. + public synchronized boolean includesTimeRange(final TimeRange tr) { + if (this.allTime) return true; + return getMin() < tr.getMax() && getMax() >= tr.getMin(); + } + + /** * Check if the specified timestamp is within this TimeRange. *

* Returns true if within interval [minStamp, maxStamp), false diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java index f083f8d..a4dce65 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java @@ -401,8 +401,7 @@ public class HFilePrettyPrinter extends Configured implements Tool { } else if (Bytes.compareTo(e.getKey(), Bytes.toBytes("TIMERANGE")) == 0) { TimeRangeTracker timeRangeTracker = new TimeRangeTracker(); Writables.copyWritable(e.getValue(), timeRangeTracker); - System.out.println(timeRangeTracker.getMinimumTimestamp() + "...." - + timeRangeTracker.getMaximumTimestamp()); + System.out.println(timeRangeTracker.getMin() + "...." + timeRangeTracker.getMax()); } else if (Bytes.compareTo(e.getKey(), FileInfo.AVG_KEY_LEN) == 0 || Bytes.compareTo(e.getKey(), FileInfo.AVG_VALUE_LEN) == 0) { System.out.println(Bytes.toInt(e.getValue())); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index a3d4dfd..fe6d550 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -678,10 +678,9 @@ public class DefaultMemStore implements MemStore { timeRange = scan.getTimeRange(); } return (timeRangeTracker.includesTimeRange(timeRange) || - snapshotTimeRangeTracker.includesTimeRange(timeRange)) - && (Math.max(timeRangeTracker.getMaximumTimestamp(), - snapshotTimeRangeTracker.getMaximumTimestamp()) >= - oldestUnexpiredTS); + snapshotTimeRangeTracker.includesTimeRange(timeRange)) && + (Math.max(timeRangeTracker.getMax(), snapshotTimeRangeTracker.getMax()) >= + oldestUnexpiredTS); } /* diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 8ffdfdd..d563672 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -18,12 +18,6 @@ */ package org.apache.hadoop.hbase.regionserver; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Function; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Ordering; - import java.io.DataInput; import java.io.IOException; import java.net.InetSocketAddress; @@ -66,9 +60,14 @@ import org.apache.hadoop.hbase.util.BloomFilter; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.BloomFilterWriter; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.io.WritableUtils; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Function; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Ordering; + /** * A Store data file. Stores usually have one or more of these files. They * are produced by flushing the memstore to disk. To @@ -503,15 +502,11 @@ public class StoreFile { reader.loadBloomfilter(BlockType.DELETE_FAMILY_BLOOM_META); try { - byte [] timerangeBytes = metadataMap.get(TIMERANGE_KEY); - if (timerangeBytes != null) { - this.reader.timeRangeTracker = new TimeRangeTracker(); - Writables.copyWritable(timerangeBytes, this.reader.timeRangeTracker); - } + this.reader.timeRange = TimeRangeTracker.getTimeRange(metadataMap.get(TIMERANGE_KEY)); } catch (IllegalArgumentException e) { LOG.error("Error reading timestamp range data from meta -- " + "proceeding without", e); - this.reader.timeRangeTracker = null; + this.reader.timeRange = null; } // initialize so we can reuse them after reader closed. firstKey = reader.getFirstKey(); @@ -535,7 +530,7 @@ public class StoreFile { } catch (IOException e) { try { boolean evictOnClose = - cacheConf != null? cacheConf.shouldEvictOnClose(): true; + cacheConf != null? cacheConf.shouldEvictOnClose(): true; this.closeReader(evictOnClose); } catch (IOException ee) { } @@ -581,7 +576,7 @@ public class StoreFile { */ public void deleteReader() throws IOException { boolean evictOnClose = - cacheConf != null? cacheConf.shouldEvictOnClose(): true; + cacheConf != null? cacheConf.shouldEvictOnClose(): true; closeReader(evictOnClose); this.fs.delete(getPath(), true); } @@ -743,15 +738,11 @@ public class StoreFile { } public Long getMinimumTimestamp() { - return (getReader().timeRangeTracker == null) ? - null : - getReader().timeRangeTracker.getMinimumTimestamp(); + return getReader().timeRange == null? null: getReader().timeRange.getMin(); } public Long getMaximumTimestamp() { - return (getReader().timeRangeTracker == null) ? - null : - getReader().timeRangeTracker.getMaximumTimestamp(); + return getReader().timeRange == null? null: getReader().timeRange.getMax(); } @@ -1108,12 +1099,11 @@ public class StoreFile { */ public static class Reader { private static final Log LOG = LogFactory.getLog(Reader.class.getName()); - protected BloomFilter generalBloomFilter = null; protected BloomFilter deleteFamilyBloomFilter = null; protected BloomType bloomFilterType; private final HFile.Reader reader; - protected TimeRangeTracker timeRangeTracker = null; + protected TimeRange timeRange = null; protected long sequenceID = -1; private byte[] lastBloomKey; private long deleteFamilyCnt = -1; @@ -1207,7 +1197,7 @@ public class StoreFile { public boolean isReferencedInReads() { return refCount.get() != 0; } - + /** * @return true if the file is compacted */ @@ -1261,12 +1251,8 @@ public class StoreFile { * @return false if queried keys definitely don't exist in this StoreFile */ boolean passesTimerangeFilter(TimeRange timeRange, long oldestUnexpiredTS) { - if (timeRangeTracker == null) { - return true; - } else { - return timeRangeTracker.includesTimeRange(timeRange) && - timeRangeTracker.getMaximumTimestamp() >= oldestUnexpiredTS; - } + return timeRange == null? true: + timeRange.includesTimeRange(timeRange) && timeRange.getMax() >= oldestUnexpiredTS; } /** @@ -1666,7 +1652,7 @@ public class StoreFile { } public long getMaxTimestamp() { - return timeRangeTracker == null ? Long.MAX_VALUE : timeRangeTracker.getMaximumTimestamp(); + return timeRange == null ? Long.MAX_VALUE : timeRange.getMax(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java index beadde6..882cef1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/TimeRangeTracker.java @@ -26,14 +26,16 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.io.TimeRange; +import org.apache.hadoop.hbase.util.Writables; import org.apache.hadoop.io.Writable; /** - * Stores the minimum and maximum timestamp values (both are inclusive). - * Can be used to find if any given time range overlaps with its time range - * MemStores use this class to track its minimum and maximum timestamps. + * Stores minimum and maximum timestamp values. Both timestamps are inclusive. + * Use this class at write- time ONLY. Too much synchronization to use at read time. + * Use {@link TimeRange} at read time. See toTimeRange() to make TimeRange to use. + * MemStores use this class to track minimum and maximum timestamps. * When writing StoreFiles, this information is stored in meta blocks and used - * at read time to match against the required TimeRange. + * at read time via an instance of {@link TimeRange} to test if Cells fit the StoreFile TimeRange. */ @InterfaceAudience.Private public class TimeRangeTracker implements Writable { @@ -52,7 +54,7 @@ public class TimeRangeTracker implements Writable { * @param trt source TimeRangeTracker */ public TimeRangeTracker(final TimeRangeTracker trt) { - set(trt.getMinimumTimestamp(), trt.getMaximumTimestamp()); + set(trt.getMin(), trt.getMax()); } public TimeRangeTracker(long minimumTimestamp, long maximumTimestamp) { @@ -75,7 +77,7 @@ public class TimeRangeTracker implements Writable { } /** - * Update the current TimestampRange to include the timestamp from Cell + * Update the current TimestampRange to include the timestamp from cell. * If the Key is of type DeleteColumn or DeleteFamily, it includes the * entire time range from 0 to timestamp of the key. * @param cell the Cell to include @@ -128,14 +130,14 @@ public class TimeRangeTracker implements Writable { /** * @return the minimumTimestamp */ - public synchronized long getMinimumTimestamp() { + public synchronized long getMin() { return minimumTimestamp; } /** * @return the maximumTimestamp */ - public synchronized long getMaximumTimestamp() { + public synchronized long getMax() { return maximumTimestamp; } @@ -153,4 +155,34 @@ public class TimeRangeTracker implements Writable { public synchronized String toString() { return "[" + minimumTimestamp + "," + maximumTimestamp + "]"; } + + /** + * @return An instance of TimeRangeTracker filled w/ the content of serialized + * TimeRangeTracker in timeRangeTrackerBytes. + * @throws IOException + */ + public static TimeRangeTracker getTimeRangeTracker(final byte [] timeRangeTrackerBytes) + throws IOException { + if (timeRangeTrackerBytes == null) return null; + TimeRangeTracker trt = new TimeRangeTracker(); + Writables.copyWritable(timeRangeTrackerBytes, trt); + return trt; + } + + /** + * @return An instance of a TimeRange made from the serialized TimeRangeTracker passed in + * timeRangeTrackerBytes. + * @throws IOException + */ + static TimeRange getTimeRange(final byte [] timeRangeTrackerBytes) throws IOException { + TimeRangeTracker trt = getTimeRangeTracker(timeRangeTrackerBytes); + return trt == null? null: trt.toTimeRange(); + } + + /** + * @return Make a TimeRange from current state of this. + */ + TimeRange toTimeRange() throws IOException { + return new TimeRange(getMin(), getMax()); + } } \ No newline at end of file diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java index 4c8bdc2..a6eef1b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java @@ -286,10 +286,9 @@ public class TestHFileOutputFormat { // unmarshall and check values. TimeRangeTracker timeRangeTracker = new TimeRangeTracker(); Writables.copyWritable(range, timeRangeTracker); - LOG.info(timeRangeTracker.getMinimumTimestamp() + - "...." + timeRangeTracker.getMaximumTimestamp()); - assertEquals(1000, timeRangeTracker.getMinimumTimestamp()); - assertEquals(2000, timeRangeTracker.getMaximumTimestamp()); + LOG.info(timeRangeTracker.getMin() + "...." + timeRangeTracker.getMax()); + assertEquals(1000, timeRangeTracker.getMin()); + assertEquals(2000, timeRangeTracker.getMax()); rd.close(); } finally { if (writer != null && context != null) writer.close(context); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 17d5df5..757e938 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -293,10 +293,10 @@ public class TestHFileOutputFormat2 { // unmarshall and check values. TimeRangeTracker timeRangeTracker = new TimeRangeTracker(); Writables.copyWritable(range, timeRangeTracker); - LOG.info(timeRangeTracker.getMinimumTimestamp() + - "...." + timeRangeTracker.getMaximumTimestamp()); - assertEquals(1000, timeRangeTracker.getMinimumTimestamp()); - assertEquals(2000, timeRangeTracker.getMaximumTimestamp()); + LOG.info(timeRangeTracker.getMin() + + "...." + timeRangeTracker.getMax()); + assertEquals(1000, timeRangeTracker.getMin()); + assertEquals(2000, timeRangeTracker.getMax()); rd.close(); } finally { if (writer != null && context != null) writer.close(context); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java index 9663426..70623e9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockStoreFile.java @@ -112,12 +112,12 @@ public class MockStoreFile extends StoreFile { public Long getMinimumTimestamp() { return (timeRangeTracker == null) ? - null : timeRangeTracker.getMinimumTimestamp(); + null : timeRangeTracker.getMin(); } public Long getMaximumTimestamp() { return (timeRangeTracker == null) ? - null : timeRangeTracker.getMaximumTimestamp(); + null : timeRangeTracker.getMax(); } @Override @@ -133,7 +133,7 @@ public class MockStoreFile extends StoreFile { @Override public StoreFile.Reader getReader() { final long len = this.length; - final TimeRangeTracker timeRange = this.timeRangeTracker; + final TimeRangeTracker timeRangeTracker = this.timeRangeTracker; final long entries = this.entryCount; return new StoreFile.Reader() { @Override @@ -143,7 +143,7 @@ public class MockStoreFile extends StoreFile { @Override public long getMaxTimestamp() { - return timeRange == null ? Long.MAX_VALUE : timeRange.maximumTimestamp; + return timeRange == null? Long.MAX_VALUE: timeRangeTracker.getMax(); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTimeRangeTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTimeRangeTracker.java index 963a9e4..815622a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTimeRangeTracker.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestTimeRangeTracker.java @@ -36,8 +36,8 @@ public class TestTimeRangeTracker { trr.includeTimestamp(3); trr.includeTimestamp(2); trr.includeTimestamp(1); - assertTrue(trr.getMinimumTimestamp() != TimeRangeTracker.INITIAL_MINIMUM_TIMESTAMP); - assertTrue(trr.getMaximumTimestamp() != -1 /*The initial max value*/); + assertTrue(trr.getMin() != TimeRangeTracker.INITIAL_MINIMUM_TIMESTAMP); + assertTrue(trr.getMax() != -1 /*The initial max value*/); } @Test @@ -80,8 +80,8 @@ public class TestTimeRangeTracker { for (int i = 0; i < threads.length; i++) { threads[i].join(); } - assertTrue(trr.getMaximumTimestamp() == calls * threadCount); - assertTrue(trr.getMinimumTimestamp() == 0); + assertTrue(trr.getMax() == calls * threadCount); + assertTrue(trr.getMin() == 0); } @Test @@ -141,7 +141,7 @@ public class TestTimeRangeTracker { for (int i = 0; i < threads.length; i++) { threads[i].join(); } - System.out.println(trr.getMinimumTimestamp() + " " + trr.getMaximumTimestamp() + " " + + System.out.println(trr.getMin() + " " + trr.getMax() + " " + (System.currentTimeMillis() - start)); } -} +} \ No newline at end of file -- 2.6.1 From 1e34a0e56ad078855928ee06809af74cd715420b Mon Sep 17 00:00:00 2001 From: Apekshit Date: Tue, 19 Apr 2016 16:59:19 -0700 Subject: [PATCH 2/3] HBASE-15673 Fix latency metrics for multiGet. - Also fixes some stuff in help text. (Apekshit) Change-Id: I95cfa2be74605efab0368417d59c50b94af0facb Signed-off-by: stack --- .../org/apache/hadoop/hbase/PerformanceEvaluation.java | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index a017d33..7dced80 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -1108,7 +1108,12 @@ public class PerformanceEvaluation extends Configured implements Tool { } finally { scope.close(); } - latencyHistogram.update((System.nanoTime() - startTime) / 1000); + // If multiget is enabled, say set to 10, testRow() returns immediately first 9 times + // and sends the actual get request in the 10th iteration. We should only set latency + // when actual request is sent because otherwise it turns out to be 0. + if (opts.multiGet == 0 || (i - startRow + 1) % opts.multiGet == 0) { + latencyHistogram.update((System.nanoTime() - startTime) / 1000); + } if (status != null && i > 0 && (i % getReportingPeriod()) == 0) { status.setStatus(generateStatus(startRow, i, lastRow)); } @@ -1794,20 +1799,23 @@ public class PerformanceEvaluation extends Configured implements Tool { System.err.println(" writeToWAL Set writeToWAL on puts. Default: True"); System.err.println(" autoFlush Set autoFlush on htable. Default: False"); System.err.println(" oneCon all the threads share the same connection. Default: False"); - System.err.println(" presplit Create presplit table. Recommended for accurate perf " + - "analysis (see guide). Default: disabled"); + System.err.println(" presplit Create presplit table. If a table with same name exists," + + " it'll be deleted and recreated (instead of verifying count of its existing regions). " + + "Recommended for accurate perf analysis (see guide). Default: disabled"); System.err.println(" inmemory Tries to keep the HFiles of the CF " + "inmemory as far as possible. Not guaranteed that reads are always served " + "from memory. Default: false"); System.err.println(" usetags Writes tags along with KVs. Use with HFile V3. " + "Default: false"); System.err.println(" numoftags Specify the no of tags that would be needed. " + - "This works only if usetags is true."); + "This works only if usetags is true. Default: " + DEFAULT_OPTS.noOfTags); System.err.println(" filterAll Helps to filter out all the rows on the server side" + " there by not returning any thing back to the client. Helps to check the server side" + " performance. Uses FilterAllFilter internally. "); System.err.println(" latency Set to report operation latencies. Default: False"); System.err.println(" bloomFilter Bloom filter type, one of " + Arrays.toString(BloomType.values())); + System.err.println(" blockEncoding Block encoding to use. Value should be one of " + + Arrays.toString(DataBlockEncoding.values()) + ". Default: NONE"); System.err.println(" valueSize Pass value size to use: Default: " + DEFAULT_OPTS.getValueSize()); System.err.println(" valueRandom Set if we should vary value size between 0 and " + -- 2.6.1 From 143bf65f2e9ba33f57a78a69f9003ac19dc8460e Mon Sep 17 00:00:00 2001 From: stack Date: Wed, 20 Apr 2016 17:22:52 -0700 Subject: [PATCH 3/3] HBASE-15366 Add doc, trace-level logging, and test around No change in operation, just adding doc., and some helpful logging M hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java Make it emit its toString in format that matches the way we log elsewhere M hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java Capitalize statics. M hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java Verify and cleanup documentation of hfileblock format at head of class. Explain what 'EXTRA_SERIALIZATION_SPACE' is all about. Connect how we serialize and deserialize... done in different places and one way when pulling from HDFS and another when pulling from cache (TO BE FIXED). Shut down a load of public access. M hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java Add trace-level logging --- .../java/org/apache/hadoop/hbase/HConstants.java | 4 +- .../io/encoding/BufferedDataBlockEncoder.java | 8 +- .../hadoop/hbase/io/encoding/DataBlockEncoder.java | 3 +- .../apache/hadoop/hbase/io/hfile/CacheConfig.java | 10 +- .../org/apache/hadoop/hbase/io/hfile/HFile.java | 11 +- .../apache/hadoop/hbase/io/hfile/HFileBlock.java | 351 ++++++++++++--------- .../hadoop/hbase/io/hfile/HFileReaderV2.java | 4 +- .../hbase/io/hfile/bucket/BucketAllocator.java | 17 +- .../hadoop/hbase/io/hfile/bucket/BucketCache.java | 35 +- .../filter/TestFilterListOrOperatorWithBlkCnt.java | 2 +- .../hbase/io/encoding/TestDataBlockEncoders.java | 25 +- .../io/encoding/TestSeekToBlockWithEncoders.java | 4 +- .../io/hfile/TestForceCacheImportantBlocks.java | 6 +- .../hadoop/hbase/io/hfile/TestHFileBlock.java | 2 +- .../io/hfile/TestHFileBlockCompatibility.java | 3 +- .../hadoop/hbase/regionserver/TestBlocksRead.java | 2 +- 16 files changed, 284 insertions(+), 203 deletions(-) diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 5f20183..7a77c7e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -64,7 +64,9 @@ public final class HConstants { public static final byte[] RPC_HEADER = new byte[] { 'H', 'B', 'a', 's' }; public static final byte RPC_CURRENT_VERSION = 0; - // HFileBlock constants. + // HFileBlock constants. TODO!!!! THESE DEFINES BELONG IN HFILEBLOCK, NOT UP HERE. + // Needed down in hbase-common though by encoders but these encoders should not be dealing + // in the internals of hfileblocks. Fix encapsulation. /** The size data structures with minor version is 0 */ public static final int HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM = MAGIC_LENGTH + 2 * Bytes.SIZEOF_INT diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java index 74e0567..d808f88 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java @@ -47,7 +47,9 @@ import org.apache.hadoop.io.WritableUtils; */ @InterfaceAudience.Private abstract class BufferedDataBlockEncoder implements DataBlockEncoder { - + /** + * TODO: This datablockencoder is dealing in internals of hfileblocks. Purge reference to HFBs + */ private static int INITIAL_KEY_BUFFER_SIZE = 512; @Override @@ -967,8 +969,8 @@ abstract class BufferedDataBlockEncoder implements DataBlockEncoder { BufferedDataBlockEncodingState state = (BufferedDataBlockEncodingState) encodingCtx .getEncodingState(); // Write the unencodedDataSizeWritten (with header size) - Bytes.putInt(uncompressedBytesWithHeader, HConstants.HFILEBLOCK_HEADER_SIZE - + DataBlockEncoding.ID_SIZE, state.unencodedDataSizeWritten + Bytes.putInt(uncompressedBytesWithHeader, + HConstants.HFILEBLOCK_HEADER_SIZE + DataBlockEncoding.ID_SIZE, state.unencodedDataSizeWritten ); if (encodingCtx.getDataBlockEncoding() != DataBlockEncoding.NONE) { encodingCtx.postEncoding(BlockType.ENCODED_DATA); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java index a9f693b..5452ae5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/DataBlockEncoder.java @@ -38,7 +38,8 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext; */ @InterfaceAudience.Private public interface DataBlockEncoder { - +// TODO: This Interface should be deprecated and replaced. It presumes hfile and carnal knowledge of +// Cell internals. It was done for a different time. Remove. Purge. /** * Starts encoding for a block of KeyValues. Call * {@link #endBlockEncoding(HFileBlockEncodingContext, DataOutputStream, byte[])} to finish diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java index 2b76b88..9b99804 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java @@ -82,8 +82,14 @@ public class CacheConfig { */ /** - * If the chosen ioengine can persist its state across restarts, the path to the file to - * persist to. + * If the chosen ioengine can persist its state across restarts, the path to the file to persist + * to. This file is NOT the data file. It is a file into which we will serialize the map of + * what is in the data file. For example, if you pass the following argument as + * BUCKET_CACHE_IOENGINE_KEY ("hbase.bucketcache.ioengine"), + * file:/tmp/bucketcache.data , then we will write the bucketcache data to the file + * /tmp/bucketcache.data but the metadata on where the data is in the supplied file + * is an in-memory map that needs to be persisted across restarts. Where to store this + * in-memory state is what you supply here: e.g. /tmp/bucketcache.map. */ public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY = "hbase.bucketcache.persistent.path"; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 3fb9554..3ee120f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -178,19 +178,20 @@ public class HFile { * The number of bytes per checksum. */ public static final int DEFAULT_BYTES_PER_CHECKSUM = 16 * 1024; + // For measuring number of checksum failures - static final Counter checksumFailures = new Counter(); + static final Counter CHECKSUM_FAILURES = new Counter(); - // for test purpose - public static final Counter dataBlockReadCnt = new Counter(); + // For tests. Gets incremented when we read a block whether from HDFS or from Cache. + public static final Counter DATABLOCK_READ_COUNT = new Counter(); /** * Number of checksum verification failures. It also * clears the counter. */ public static final long getChecksumFailuresCount() { - long count = checksumFailures.get(); - checksumFailures.set(0); + long count = CHECKSUM_FAILURES.get(); + CHECKSUM_FAILURES.set(0); return count; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java index 6cd7b20..af3dd49 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java @@ -27,6 +27,8 @@ import java.nio.ByteBuffer; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; @@ -51,78 +53,112 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; /** - * Reading {@link HFile} version 1 and 2 blocks, and writing version 2 blocks. - *

+ *

Be aware that when we read from HDFS, we overread pulling in the next blocks' header too. + * We do this to save having to do two seeks to read an HFileBlock; a seek to read the header + * to figure lengths, etc., and then another seek to pull in the data. */ @InterfaceAudience.Private @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="HE_EQUALS_USE_HASHCODE", justification="Fix!!! Fine for now bug FIXXXXXXX!!!!") public class HFileBlock implements Cacheable { + private static final Log LOG = LogFactory.getLog(HFileBlock.class); /** - * On a checksum failure on a Reader, these many suceeding read - * requests switch back to using hdfs checksums before auto-reenabling - * hbase checksum verification. + * On a checksum failure, do these many succeeding read requests using hdfs checksums before + * auto-reenabling hbase checksum verification. */ static final int CHECKSUM_VERIFICATION_NUM_IO_THRESHOLD = 3; + private static int UNSET = -1; public static final boolean FILL_HEADER = true; public static final boolean DONT_FILL_HEADER = false; - /** - * The size of block header when blockType is {@link BlockType#ENCODED_DATA}. - * This extends normal header by adding the id of encoder. - */ - public static final int ENCODED_HEADER_SIZE = HConstants.HFILEBLOCK_HEADER_SIZE - + DataBlockEncoding.ID_SIZE; - - static final byte[] DUMMY_HEADER_NO_CHECKSUM = - new byte[HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM]; - public static final int BYTE_BUFFER_HEAP_SIZE = (int) ClassSize.estimateBase( ByteBuffer.wrap(new byte[0], 0, 0).getClass(), false); - // meta.usesHBaseChecksum+offset+nextBlockOnDiskSizeWithHeader - public static final int EXTRA_SERIALIZATION_SPACE = Bytes.SIZEOF_BYTE + Bytes.SIZEOF_INT - + Bytes.SIZEOF_LONG; + /** + * See #blockDeserializer method for more info. + * 13 bytes of extra stuff stuck on the end of the HFileBlock that we pull in from HDFS (note, + * when we read from HDFS, we pull in an HFileBlock AND the header of the next block if one). + * The 13 bytes are: usesHBaseChecksum (1 byte) + offset of this block (long) + + * nextBlockOnDiskSizeWithHeader (int). + */ + public static final int EXTRA_SERIALIZATION_SPACE = + Bytes.SIZEOF_BYTE + Bytes.SIZEOF_INT + Bytes.SIZEOF_LONG; /** * Each checksum value is an integer that can be stored in 4 bytes. */ static final int CHECKSUM_SIZE = Bytes.SIZEOF_INT; + static final byte[] DUMMY_HEADER_NO_CHECKSUM = + new byte[HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM]; + + /** + * Used deserializing blocks from Cache. + * + * Serializing to cache is a little hard to follow. See Writer#finishBlock for where it is done. + * When we start to append to a new HFileBlock, + * we skip over where the header should go before we start adding Cells. When the block is + * done, we'll then go back and fill in the header and the checksum tail. Be aware that what + * gets serialized into the blockcache is a byte array that contains an HFileBlock followed by + * its checksums and then the header of the next HFileBlock (needed to help navigate), followed + * again by an extra 13 bytes of meta info needed when time to recreate the HFileBlock from cache. + * + * ++++++++++++++ + * + HFileBlock + + * ++++++++++++++ + * + Checksums + + * ++++++++++++++ + * + NextHeader + + * ++++++++++++++ + * + ExtraMeta! + + * ++++++++++++++ + * + * TODO: Fix it so we do NOT put the NextHeader into blockcache. It is not necessary. + */ static final CacheableDeserializer blockDeserializer = new CacheableDeserializer() { public HFileBlock deserialize(ByteBuffer buf, boolean reuse) throws IOException{ + // Rewind to just before the EXTRA_SERIALIZATION_SPACE. buf.limit(buf.limit() - HFileBlock.EXTRA_SERIALIZATION_SPACE).rewind(); + // Get a new buffer to pass the deserialized HFileBlock for it to 'own'. ByteBuffer newByteBuffer; if (reuse) { newByteBuffer = buf.slice(); @@ -130,6 +166,7 @@ public class HFileBlock implements Cacheable { newByteBuffer = ByteBuffer.allocate(buf.limit()); newByteBuffer.put(buf); } + // Read out the EXTRA_SERIALIZATION_SPACE content and shove into our HFileBlock. buf.position(buf.limit()); buf.limit(buf.limit() + HFileBlock.EXTRA_SERIALIZATION_SPACE); boolean usesChecksum = buf.get() == (byte)1; @@ -161,18 +198,28 @@ public class HFileBlock implements Cacheable { /** Type of block. Header field 0. */ private BlockType blockType; - /** Size on disk excluding header, including checksum. Header field 1. */ + /** + * Size on disk excluding header, including checksum. Header field 1. + * @see Writer#putHeader(byte[], int, int, int, int) + */ private int onDiskSizeWithoutHeader; - /** Size of pure data. Does not include header or checksums. Header field 2. */ + /** + * Size of pure data. Does not include header or checksums. Header field 2. + * @see Writer#putHeader(byte[], int, int, int, int) + */ private final int uncompressedSizeWithoutHeader; - /** The offset of the previous block on disk. Header field 3. */ + /** + * The offset of the previous block on disk. Header field 3. + * @see Writer#putHeader(byte[], int, int, int, int) + */ private final long prevBlockOffset; /** * Size on disk of header + data. Excludes checksum. Header field 6, * OR calculated from {@link #onDiskSizeWithoutHeader} when using HDFS checksum. + * @see Writer#putHeader(byte[], int, int, int, int) */ private final int onDiskDataSizeWithHeader; @@ -186,7 +233,7 @@ public class HFileBlock implements Cacheable { * The offset of this block in the file. Populated by the reader for * convenience of access. This offset is not part of the block header. */ - private long offset = -1; + private long offset = UNSET; /** * The on-disk size of the next block, including the header, obtained by @@ -197,7 +244,7 @@ public class HFileBlock implements Cacheable { /** * Creates a new {@link HFile} block from the given fields. This constructor - * is mostly used when the block data has already been read and uncompressed, + * is used when the block data has already been read and uncompressed, * and is sitting in a byte buffer. * * @param blockType the type of this block, see {@link BlockType} @@ -205,8 +252,8 @@ public class HFileBlock implements Cacheable { * @param uncompressedSizeWithoutHeader see {@link #uncompressedSizeWithoutHeader} * @param prevBlockOffset see {@link #prevBlockOffset} * @param buf block header ({@link HConstants#HFILEBLOCK_HEADER_SIZE} bytes) followed by - * uncompressed data. This - * @param fillHeader when true, parse {@code buf} and override the first 4 header fields. + * uncompressed data. + * @param fillHeader when true, write the first 4 header fields into passed buffer. * @param offset the file offset the block was read from * @param onDiskDataSizeWithHeader see {@link #onDiskDataSizeWithHeader} * @param fileContext HFile meta data @@ -222,8 +269,9 @@ public class HFileBlock implements Cacheable { this.offset = offset; this.onDiskDataSizeWithHeader = onDiskDataSizeWithHeader; this.fileContext = fileContext; - if (fillHeader) + if (fillHeader) { overwriteHeader(); + } this.buf.rewind(); } @@ -265,8 +313,8 @@ public class HFileBlock implements Cacheable { } else { contextBuilder.withChecksumType(ChecksumType.NULL); contextBuilder.withBytesPerCheckSum(0); - this.onDiskDataSizeWithHeader = onDiskSizeWithoutHeader + - HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM; + this.onDiskDataSizeWithHeader = + onDiskSizeWithoutHeader + HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM; } this.fileContext = contextBuilder.build(); buf = b; @@ -296,14 +344,14 @@ public class HFileBlock implements Cacheable { /** * @return the on-disk size of the data part + checksum (header excluded). */ - public int getOnDiskSizeWithoutHeader() { + int getOnDiskSizeWithoutHeader() { return onDiskSizeWithoutHeader; } /** * @return the uncompressed size of data part (header and checksum excluded). */ - public int getUncompressedSizeWithoutHeader() { + public int getUncompressedSizeWithoutHeader() { return uncompressedSizeWithoutHeader; } @@ -311,7 +359,7 @@ public class HFileBlock implements Cacheable { * @return the offset of the previous block of the same type in the file, or * -1 if unknown */ - public long getPrevBlockOffset() { + long getPrevBlockOffset() { return prevBlockOffset; } @@ -347,9 +395,9 @@ public class HFileBlock implements Cacheable { /** * Returns the buffer this block stores internally. The clients must not * modify the buffer object. This method has to be public because it is - * used in {@link org.apache.hadoop.hbase.util.CompoundBloomFilter} - * to avoid object creation on every Bloom filter lookup, but has to - * be used with caution. Checksum data is not included in the returned + * used in {@link org.apache.hadoop.hbase.util.CompoundBloomFilter} + * to avoid object creation on every Bloom filter lookup, but has to + * be used with caution. Checksum data is not included in the returned * buffer but header data is. * * @return the buffer of this block for read-only operations @@ -445,20 +493,20 @@ public class HFileBlock implements Cacheable { @Override public String toString() { StringBuilder sb = new StringBuilder() - .append("HFileBlock [") - .append(" fileOffset=").append(offset) - .append(" headerSize()=").append(headerSize()) - .append(" blockType=").append(blockType) - .append(" onDiskSizeWithoutHeader=").append(onDiskSizeWithoutHeader) - .append(" uncompressedSizeWithoutHeader=").append(uncompressedSizeWithoutHeader) - .append(" prevBlockOffset=").append(prevBlockOffset) - .append(" isUseHBaseChecksum()=").append(fileContext.isUseHBaseChecksum()); + .append("[") + .append("blockType=").append(blockType) + .append(", fileOffset=").append(offset) + .append(", headerSize=").append(headerSize()) + .append(", onDiskSizeWithoutHeader=").append(onDiskSizeWithoutHeader) + .append(", uncompressedSizeWithoutHeader=").append(uncompressedSizeWithoutHeader) + .append(", prevBlockOffset=").append(prevBlockOffset) + .append(", isUseHBaseChecksum=").append(fileContext.isUseHBaseChecksum()); if (fileContext.isUseHBaseChecksum()) { - sb.append(" checksumType=").append(ChecksumType.codeToType(this.buf.get(24))) - .append(" bytesPerChecksum=").append(this.buf.getInt(24 + 1)) - .append(" onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader); + sb.append(", checksumType=").append(ChecksumType.codeToType(this.buf.get(24))) + .append(", bytesPerChecksum=").append(this.buf.getInt(24 + 1)) + .append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader); } else { - sb.append(" onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader) + sb.append(", onDiskDataSizeWithHeader=").append(onDiskDataSizeWithHeader) .append("(").append(onDiskSizeWithoutHeader) .append("+").append(HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM).append(")"); } @@ -473,13 +521,13 @@ public class HFileBlock implements Cacheable { bufWithoutHeader.get(dataBeginBytes); dataBegin = Bytes.toStringBinary(dataBeginBytes); } - sb.append(" getOnDiskSizeWithHeader()=").append(getOnDiskSizeWithHeader()) - .append(" totalChecksumBytes()=").append(totalChecksumBytes()) - .append(" isUnpacked()=").append(isUnpacked()) - .append(" buf=[ ").append(buf).append(" ]") - .append(" dataBeginsWith=").append(dataBegin) - .append(" fileContext=").append(fileContext) - .append(" ]"); + sb.append(", getOnDiskSizeWithHeader=").append(getOnDiskSizeWithHeader()) + .append(", totalChecksumBytes=").append(totalChecksumBytes()) + .append(", isUnpacked=").append(isUnpacked()) + .append(", buf=[").append(buf).append("]") + .append(", dataBeginsWith=").append(dataBegin) + .append(", fileContext=").append(fileContext) + .append("]"); return sb.toString(); } @@ -607,19 +655,8 @@ public class HFileBlock implements Cacheable { } } - /** - * @param expectedType the expected type of this block - * @throws IOException if this block's type is different than expected - */ - public void expectType(BlockType expectedType) throws IOException { - if (blockType != expectedType) { - throw new IOException("Invalid block type: expected=" + expectedType - + ", actual=" + blockType); - } - } - /** @return the offset of this block in the file it was read from */ - public long getOffset() { + long getOffset() { if (offset < 0) { throw new IllegalStateException( "HFile block offset not initialized properly"); @@ -660,21 +697,20 @@ public class HFileBlock implements Cacheable { } /** - * Read from an input stream. Analogous to + * Read from an input stream at least necessaryLen and if possible, + * extraLen also if available. Analogous to * {@link IOUtils#readFully(InputStream, byte[], int, int)}, but specifies a - * number of "extra" bytes that would be desirable but not absolutely - * necessary to read. + * number of "extra" bytes to also optionally read. * * @param in the input stream to read from * @param buf the buffer to read into * @param bufOffset the destination offset in the buffer - * @param necessaryLen the number of bytes that are absolutely necessary to - * read + * @param necessaryLen the number of bytes that are absolutely necessary to read * @param extraLen the number of extra bytes that would be nice to read * @return true if succeeded reading the extra bytes * @throws IOException if failed to read the necessary bytes */ - public static boolean readWithExtra(InputStream in, byte[] buf, + static boolean readWithExtra(InputStream in, byte[] buf, int bufOffset, int necessaryLen, int extraLen) throws IOException { int bytesRemaining = necessaryLen + extraLen; while (bytesRemaining > 0) { @@ -698,7 +734,8 @@ public class HFileBlock implements Cacheable { } /** - * Read from an input stream. Analogous to + * Read from an input stream at least necessaryLen and if possible, + * extraLen also if available. Analogous to * {@link IOUtils#readFully(InputStream, byte[], int, int)}, but uses * positional read and specifies a number of "extra" bytes that would be * desirable but not absolutely necessary to read. @@ -751,14 +788,13 @@ public class HFileBlock implements Cacheable { *

  • Construct an {@link HFileBlock.Writer}, providing a compression algorithm. *
  • Call {@link Writer#startWriting} and get a data stream to write to. *
  • Write your data into the stream. - *
  • Call {@link Writer#writeHeaderAndData(FSDataOutputStream)} as many times as you need to. + *
  • Call Writer#writeHeaderAndData(FSDataOutputStream) as many times as you need to. * store the serialized block into an external stream. *
  • Repeat to write more blocks. * *

    */ public static class Writer { - private enum State { INIT, WRITING, @@ -773,7 +809,7 @@ public class HFileBlock implements Cacheable { private HFileBlockEncodingContext dataBlockEncodingCtx; - /** block encoding context for non-data blocks */ + /** block encoding context for non-data blocks*/ private HFileBlockDefaultEncodingContext defaultBlockEncodingCtx; /** @@ -846,25 +882,26 @@ public class HFileBlock implements Cacheable { * @param dataBlockEncoder data block encoding algorithm to use */ public Writer(HFileDataBlockEncoder dataBlockEncoder, HFileContext fileContext) { - this.dataBlockEncoder = dataBlockEncoder != null - ? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE; - defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null, - HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext); - dataBlockEncodingCtx = this.dataBlockEncoder - .newDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext); - if (fileContext.getBytesPerChecksum() < HConstants.HFILEBLOCK_HEADER_SIZE) { throw new RuntimeException("Unsupported value of bytesPerChecksum. " + " Minimum is " + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " + fileContext.getBytesPerChecksum()); } - + this.dataBlockEncoder = dataBlockEncoder != null? + dataBlockEncoder: NoOpDataBlockEncoder.INSTANCE; + this.dataBlockEncodingCtx = this.dataBlockEncoder. + newDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext); + // TODO: This should be lazily instantiated since we usually do NOT need this default encoder + this.defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null, + HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext); + // TODO: Set BAOS initial size. Use fileContext.getBlocksize() and add for header/checksum baosInMemory = new ByteArrayOutputStream(); - prevOffsetByType = new long[BlockType.values().length]; - for (int i = 0; i < prevOffsetByType.length; ++i) - prevOffsetByType[i] = -1; - + for (int i = 0; i < prevOffsetByType.length; ++i) { + prevOffsetByType[i] = UNSET; + } + // TODO: Why fileContext saved away when we have dataBlockEncoder and/or + // defaultDataBlockEncoder? this.fileContext = fileContext; } @@ -874,7 +911,7 @@ public class HFileBlock implements Cacheable { * @return the stream the user can write their data into * @throws IOException */ - public DataOutputStream startWriting(BlockType newBlockType) + DataOutputStream startWriting(BlockType newBlockType) throws IOException { if (state == State.BLOCK_READY && startOffset != -1) { // We had a previous block that was written to a stream at a specific @@ -904,10 +941,10 @@ public class HFileBlock implements Cacheable { * @param cell * @throws IOException */ - public void write(Cell cell) throws IOException{ + void write(Cell cell) throws IOException{ expectState(State.WRITING); - this.unencodedDataSizeWritten += this.dataBlockEncoder.encode(cell, dataBlockEncodingCtx, - this.userDataStream); + this.unencodedDataSizeWritten += + this.dataBlockEncoder.encode(cell, dataBlockEncodingCtx, this.userDataStream); } /** @@ -954,6 +991,7 @@ public class HFileBlock implements Cacheable { } userDataStream.flush(); // This does an array copy, so it is safe to cache this byte array. + // Header is still the empty, 'dummy' header that is yet to be filled out. uncompressedBytesWithHeader = baosInMemory.toByteArray(); prevOffset = prevOffsetByType[blockType.getId()]; @@ -965,22 +1003,25 @@ public class HFileBlock implements Cacheable { onDiskBytesWithHeader = dataBlockEncodingCtx .compressAndEncrypt(uncompressedBytesWithHeader); } else { - onDiskBytesWithHeader = defaultBlockEncodingCtx - .compressAndEncrypt(uncompressedBytesWithHeader); + onDiskBytesWithHeader = this.defaultBlockEncodingCtx. + compressAndEncrypt(uncompressedBytesWithHeader); } + // Calculate how many bytes we need for checksum on the tail of the block. int numBytes = (int) ChecksumUtil.numBytes( onDiskBytesWithHeader.length, fileContext.getBytesPerChecksum()); - // put the header for on disk bytes + // Put the header for the on disk bytes; header currently is unfilled-out putHeader(onDiskBytesWithHeader, 0, onDiskBytesWithHeader.length + numBytes, uncompressedBytesWithHeader.length, onDiskBytesWithHeader.length); - // set the header for the uncompressed bytes (for cache-on-write) - putHeader(uncompressedBytesWithHeader, 0, + // Set the header for the uncompressed bytes (for cache-on-write) -- IFF different from + // onDiskBytesWithHeader array. + if (onDiskBytesWithHeader != uncompressedBytesWithHeader) { + putHeader(uncompressedBytesWithHeader, 0, onDiskBytesWithHeader.length + numBytes, uncompressedBytesWithHeader.length, onDiskBytesWithHeader.length); - + } onDiskChecksum = new byte[numBytes]; ChecksumUtil.generateChecksums( onDiskBytesWithHeader, 0, onDiskBytesWithHeader.length, @@ -1027,9 +1068,9 @@ public class HFileBlock implements Cacheable { * @param out * @throws IOException */ - public void writeHeaderAndData(FSDataOutputStream out) throws IOException { + void writeHeaderAndData(FSDataOutputStream out) throws IOException { long offset = out.getPos(); - if (startOffset != -1 && offset != startOffset) { + if (startOffset != UNSET && offset != startOffset) { throw new IOException("A " + blockType + " block written to a " + "stream twice, first at offset " + startOffset + ", then at " + offset); @@ -1082,7 +1123,7 @@ public class HFileBlock implements Cacheable { /** * Releases resources used by this writer. */ - public void release() { + void release() { if (dataBlockEncodingCtx != null) { dataBlockEncodingCtx.close(); dataBlockEncodingCtx = null; @@ -1103,9 +1144,8 @@ public class HFileBlock implements Cacheable { */ int getOnDiskSizeWithoutHeader() { expectState(State.BLOCK_READY); - return onDiskBytesWithHeader.length - + onDiskChecksum.length - - HConstants.HFILEBLOCK_HEADER_SIZE; + return onDiskBytesWithHeader.length + + onDiskChecksum.length - HConstants.HFILEBLOCK_HEADER_SIZE; } /** @@ -1137,7 +1177,7 @@ public class HFileBlock implements Cacheable { } /** @return true if a block is being written */ - public boolean isWriting() { + boolean isWriting() { return state == State.WRITING; } @@ -1148,7 +1188,7 @@ public class HFileBlock implements Cacheable { * * @return the number of bytes written */ - public int blockSizeWritten() { + int blockSizeWritten() { if (state != State.WRITING) return 0; return this.unencodedDataSizeWritten; } @@ -1196,7 +1236,7 @@ public class HFileBlock implements Cacheable { * @param out the file system output stream * @throws IOException */ - public void writeBlock(BlockWritable bw, FSDataOutputStream out) + void writeBlock(BlockWritable bw, FSDataOutputStream out) throws IOException { bw.writeToBlock(startWriting(bw.getBlockType())); writeHeaderAndData(out); @@ -1209,7 +1249,7 @@ public class HFileBlock implements Cacheable { * version is MINOR_VERSION_WITH_CHECKSUM. This is indicated by setting a * 0 value in bytesPerChecksum. */ - public HFileBlock getBlockForCaching(CacheConfig cacheConf) { + HFileBlock getBlockForCaching(CacheConfig cacheConf) { HFileContext newContext = new HFileContextBuilder() .withBlockSize(fileContext.getBlocksize()) .withBytesPerCheckSum(0) @@ -1232,7 +1272,7 @@ public class HFileBlock implements Cacheable { } /** Something that can be written into a block. */ - public interface BlockWritable { + interface BlockWritable { /** The type of block this data should use. */ BlockType getBlockType(); @@ -1249,7 +1289,7 @@ public class HFileBlock implements Cacheable { // Block readers and writers /** An interface allowing to iterate {@link HFileBlock}s. */ - public interface BlockIterator { + interface BlockIterator { /** * Get the next block, or null if there are no more blocks to iterate. @@ -1264,7 +1304,7 @@ public class HFileBlock implements Cacheable { } /** A full-fledged reader with iteration ability. */ - public interface FSReader { + interface FSReader { /** * Reads the block at the given offset in the file with the given on-disk @@ -1371,8 +1411,8 @@ public class HFileBlock implements Cacheable { * the on-disk size of the next block, or -1 if it could not be determined. * * @param dest destination buffer - * @param destOffset offset in the destination buffer - * @param size size of the block to be read + * @param destOffset offset into the destination buffer at where to put the bytes we read + * @param size size of read * @param peekIntoNextBlock whether to read the next block's on-disk size * @param fileOffset position in the stream to read at * @param pread whether we should do a positional read @@ -1381,12 +1421,10 @@ public class HFileBlock implements Cacheable { * -1 if it could not be determined * @throws IOException */ - protected int readAtOffset(FSDataInputStream istream, - byte[] dest, int destOffset, int size, + protected int readAtOffset(FSDataInputStream istream, byte [] dest, int destOffset, int size, boolean peekIntoNextBlock, long fileOffset, boolean pread) - throws IOException { - if (peekIntoNextBlock && - destOffset + size + hdrSize > dest.length) { + throws IOException { + if (peekIntoNextBlock && destOffset + size + hdrSize > dest.length) { // We are asked to read the next block's header as well, but there is // not enough room in the array. throw new IOException("Attempted to read " + size + " bytes and " + @@ -1440,9 +1478,15 @@ public class HFileBlock implements Cacheable { long offset = -1; byte[] header = new byte[HConstants.HFILEBLOCK_HEADER_SIZE]; final ByteBuffer buf = ByteBuffer.wrap(header, 0, HConstants.HFILEBLOCK_HEADER_SIZE); + @Override + public String toString() { + return "offset=" + this.offset + ", header=" + Bytes.toStringBinary(header); + } } - /** Reads version 2 blocks from the filesystem. */ + /** + * Reads version 2 blocks from the filesystem. + */ static class FSReaderImpl extends AbstractFSReader { /** The file system stream of the underlying {@link HFile} that * does or doesn't do checksum validations in the filesystem */ @@ -1523,7 +1567,7 @@ public class HFileBlock implements Cacheable { HFile.LOG.warn(msg); throw new IOException(msg); // cannot happen case here } - HFile.checksumFailures.increment(); // update metrics + HFile.CHECKSUM_FAILURES.increment(); // update metrics // If we have a checksum failure, we fall back into a mode where // the next few reads use HDFS level checksums. We aim to make the @@ -1598,6 +1642,7 @@ public class HFileBlock implements Cacheable { } int onDiskSizeWithHeader = (int) onDiskSizeWithHeaderL; + // See if we can avoid reading the header. This is desirable, because // we will not incur a backward seek operation if we have already // read this block's header as part of the previous read's look-ahead. @@ -1605,15 +1650,22 @@ public class HFileBlock implements Cacheable { // been read. // TODO: How often does this optimization fire? Has to be same thread so the thread local // is pertinent and we have to be reading next block as in a big scan. + ByteBuffer headerBuf = null; PrefetchedHeader prefetchedHeader = prefetchedHeaderForThread.get(); - ByteBuffer headerBuf = prefetchedHeader.offset == offset? prefetchedHeader.buf: null; - + boolean preReadHeader = false; + if (prefetchedHeader != null && prefetchedHeader.offset == offset) { + headerBuf = prefetchedHeader.buf; + preReadHeader = true; + } // Allocate enough space to fit the next block's header too. int nextBlockOnDiskSize = 0; byte[] onDiskBlock = null; HFileBlock b = null; + boolean fastPath = false; + boolean readHdrOnly = false; if (onDiskSizeWithHeader > 0) { + fastPath = true; // We know the total on-disk size. Read the entire block into memory, // then parse the header. This code path is used when // doing a random read operation relying on the block index, as well as @@ -1670,6 +1722,7 @@ public class HFileBlock implements Cacheable { // Unfortunately, we still have to do a separate read operation to // read the header. if (headerBuf == null) { + readHdrOnly = true; // From the header, determine the on-disk size of the given hfile // block, and read the remaining data, thereby incurring two read // operations. This might happen when we are doing the first read @@ -1682,12 +1735,12 @@ public class HFileBlock implements Cacheable { } // TODO: FIX!!! Expensive parse just to get a length b = new HFileBlock(headerBuf, fileContext.isUseHBaseChecksum()); + // onDiskBlock is whole block + header + checksums then extra hdrSize to read next header onDiskBlock = new byte[b.getOnDiskSizeWithHeader() + hdrSize]; - // headerBuf is HBB + // headerBuf is HBB. Copy hdr into onDiskBlock System.arraycopy(headerBuf.array(), headerBuf.arrayOffset(), onDiskBlock, 0, hdrSize); - nextBlockOnDiskSize = - readAtOffset(is, onDiskBlock, hdrSize, b.getOnDiskSizeWithHeader() - - hdrSize, true, offset + hdrSize, pread); + nextBlockOnDiskSize = readAtOffset(is, onDiskBlock, hdrSize, + b.getOnDiskSizeWithHeader() - hdrSize, true, offset + hdrSize, pread); onDiskSizeWithHeader = b.onDiskSizeWithoutHeader + hdrSize; } @@ -1717,6 +1770,10 @@ public class HFileBlock implements Cacheable { b.offset = offset; b.fileContext.setIncludesTags(this.fileContext.isIncludesTags()); b.fileContext.setIncludesMvcc(this.fileContext.isIncludesMvcc()); + if (LOG.isTraceEnabled()) { + LOG.trace("Read preReadHeader=" + preReadHeader + ", fastPath=" + fastPath + + ", readHdrOnly=" + readHdrOnly + ", " + b); + } return b; } @@ -1778,6 +1835,9 @@ public class HFileBlock implements Cacheable { serializeExtraInfo(destination); } + /** + * Write out the content of EXTRA_SERIALIZATION_SPACE. Public so can be accessed by BucketCache. + */ public void serializeExtraInfo(ByteBuffer destination) { destination.put(this.fileContext.isUseHBaseChecksum() ? (byte) 1 : (byte) 0); destination.putLong(this.offset); @@ -1850,7 +1910,7 @@ public class HFileBlock implements Cacheable { } /** - * Calcuate the number of bytes required to store all the checksums + * Calculate the number of bytes required to store all the checksums * for this block. Each checksum value is a 4 byte integer. */ int totalChecksumBytes() { @@ -1876,16 +1936,14 @@ public class HFileBlock implements Cacheable { * Maps a minor version to the size of the header. */ public static int headerSize(boolean usesHBaseChecksum) { - if (usesHBaseChecksum) { - return HConstants.HFILEBLOCK_HEADER_SIZE; - } - return HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM; + return usesHBaseChecksum? + HConstants.HFILEBLOCK_HEADER_SIZE: HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM; } /** * Return the appropriate DUMMY_HEADER for the minor version */ - public byte[] getDummyHeaderForVersion() { + byte[] getDummyHeaderForVersion() { return getDummyHeaderForVersion(this.fileContext.isUseHBaseChecksum()); } @@ -1893,17 +1951,14 @@ public class HFileBlock implements Cacheable { * Return the appropriate DUMMY_HEADER for the minor version */ static private byte[] getDummyHeaderForVersion(boolean usesHBaseChecksum) { - if (usesHBaseChecksum) { - return HConstants.HFILEBLOCK_DUMMY_HEADER; - } - return DUMMY_HEADER_NO_CHECKSUM; + return usesHBaseChecksum? HConstants.HFILEBLOCK_DUMMY_HEADER: DUMMY_HEADER_NO_CHECKSUM; } /** * @return the HFileContext used to create this HFileBlock. Not necessary the * fileContext for the file from which this block's data was originally read. */ - public HFileContext getHFileContext() { + HFileContext getHFileContext() { return this.fileContext; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java index ba09bc1..1e1a4fa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java @@ -418,7 +418,7 @@ public class HFileReaderV2 extends AbstractHFileReader { assert cachedBlock.isUnpacked() : "Packed block leak."; if (cachedBlock.getBlockType().isData()) { if (updateCacheMetrics) { - HFile.dataBlockReadCnt.increment(); + HFile.DATABLOCK_READ_COUNT.increment(); } // Validate encoding type for data blocks. We include encoding // type in the cache key, and we expect it to match on a cache hit. @@ -457,7 +457,7 @@ public class HFileReaderV2 extends AbstractHFileReader { } if (updateCacheMetrics && hfileBlock.getBlockType().isData()) { - HFile.dataBlockReadCnt.increment(); + HFile.DATABLOCK_READ_COUNT.increment(); } return unpacked; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java index 8c0d0b5..fedfd20 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java @@ -270,9 +270,10 @@ public final class BucketAllocator { } } - // Default block size is 64K, so we choose more sizes near 64K, you'd better + // Default block size in hbase is 64K, so we choose more sizes near 64K, you'd better // reset it according to your cluster's block size distribution // TODO Support the view of block size distribution statistics + // TODO: Why we add the extra 1024 bytes? Slop? private static final int DEFAULT_BUCKET_SIZES[] = { 4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024, 32 * 1024 + 1024, 40 * 1024 + 1024, 48 * 1024 + 1024, 56 * 1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024, 128 * 1024 + 1024, @@ -290,6 +291,9 @@ public final class BucketAllocator { return null; } + /** + * So, what is the minimum amount of items we'll tolerate in a single bucket? + */ static public final int FEWEST_ITEMS_IN_BUCKET = 4; private final int[] bucketSizes; @@ -309,9 +313,8 @@ public final class BucketAllocator { this.bucketCapacity = FEWEST_ITEMS_IN_BUCKET * bigItemSize; buckets = new Bucket[(int) (availableSpace / bucketCapacity)]; if (buckets.length < this.bucketSizes.length) - throw new BucketAllocatorException( - "Bucket allocator size too small - must have room for at least " - + this.bucketSizes.length + " buckets"); + throw new BucketAllocatorException("Bucket allocator size too small (" + buckets.length + + "); must have room for at least " + this.bucketSizes.length + " buckets"); bucketSizeInfos = new BucketSizeInfo[this.bucketSizes.length]; for (int i = 0; i < this.bucketSizes.length; ++i) { bucketSizeInfos[i] = new BucketSizeInfo(i); @@ -322,6 +325,12 @@ public final class BucketAllocator { .instantiateBucket(buckets[i]); } this.totalSize = ((long) buckets.length) * bucketCapacity; + if (LOG.isInfoEnabled()) { + LOG.info("Cache totalSize=" + this.totalSize + ", buckets=" + this.buckets.length + + ", bucket capacity=" + this.bucketCapacity + + "=(" + FEWEST_ITEMS_IN_BUCKET + "*" + this.bigItemSize + ")=" + + "(FEWEST_ITEMS_IN_BUCKET*(largest configured bucketcache size))"); + } } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index 4699262..3f4640d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -75,7 +75,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; /** * BucketCache uses {@link BucketAllocator} to allocate/free blocks, and uses - * {@link BucketCache#ramCache} and {@link BucketCache#backingMap} in order to + * BucketCache#ramCache and BucketCache#backingMap in order to * determine if a given element is in the cache. The bucket cache can use on-heap or * off-heap memory {@link ByteBufferIOEngine} or in a file {@link FileIOEngine} to * store/read the block data. @@ -84,7 +84,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; * {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache} * *

    BucketCache can be used as mainly a block cache (see - * {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), + * {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with * combined with LruBlockCache to decrease CMS GC and heap fragmentation. * *

    It also can be used as a secondary cache (e.g. using a file on ssd/fusionio to store @@ -343,6 +343,7 @@ public class BucketCache implements BlockCache, HeapSize { */ public void cacheBlockWithWait(BlockCacheKey cacheKey, Cacheable cachedItem, boolean inMemory, boolean wait) { + if (LOG.isTraceEnabled()) LOG.trace("Caching key=" + cacheKey + ", item=" + cachedItem); if (!cacheEnabled) { return; } @@ -414,6 +415,9 @@ public class BucketCache implements BlockCache, HeapSize { // existence here. if (bucketEntry.equals(backingMap.get(key))) { int len = bucketEntry.getLength(); + if (LOG.isTraceEnabled()) { + LOG.trace("Read offset=" + bucketEntry.offset() + ", len=" + len); + } ByteBuffer bb = ByteBuffer.allocate(len); int lenRead = ioEngine.read(bb, bucketEntry.offset()); if (lenRead != len) { @@ -565,7 +569,9 @@ public class BucketCache implements BlockCache, HeapSize { */ private void freeSpace(final String why) { // Ensure only one freeSpace progress at a time - if (!freeSpaceLock.tryLock()) return; + if (!freeSpaceLock.tryLock()) { + return; + } try { freeInProgress = true; long bytesToFreeWithoutExtra = 0; @@ -594,7 +600,7 @@ public class BucketCache implements BlockCache, HeapSize { return; } long currentSize = bucketAllocator.getUsedSize(); - long totalSize=bucketAllocator.getTotalSize(); + long totalSize = bucketAllocator.getTotalSize(); if (LOG.isDebugEnabled() && msgBuffer != null) { LOG.debug("Free started because \"" + why + "\"; " + msgBuffer.toString() + " of current used=" + StringUtils.byteDesc(currentSize) + ", actual cacheSize=" + @@ -799,7 +805,7 @@ public class BucketCache implements BlockCache, HeapSize { } } - // Make sure data pages are written are on media before we update maps. + // Make sure data pages are written on media before we update maps. try { ioEngine.sync(); } catch (IOException ioex) { @@ -873,9 +879,9 @@ public class BucketCache implements BlockCache, HeapSize { FileOutputStream fos = null; ObjectOutputStream oos = null; try { - if (!ioEngine.isPersistent()) - throw new IOException( - "Attempt to persist non-persistent cache mappings!"); + if (!ioEngine.isPersistent()) { + throw new IOException("Attempt to persist non-persistent cache mappings!"); + } fos = new FileOutputStream(persistencePath, false); oos = new ObjectOutputStream(fos); oos.writeLong(cacheCapacity); @@ -955,19 +961,17 @@ public class BucketCache implements BlockCache, HeapSize { } /** - * Used to shut down the cache -or- turn it off in the case of something - * broken. + * Used to shut down the cache -or- turn it off in the case of something broken. */ private void disableCache() { - if (!cacheEnabled) - return; + if (!cacheEnabled) return; cacheEnabled = false; ioEngine.shutdown(); this.scheduleThreadPool.shutdown(); - for (int i = 0; i < writerThreads.length; ++i) - writerThreads[i].interrupt(); + for (int i = 0; i < writerThreads.length; ++i) writerThreads[i].interrupt(); this.ramCache.clear(); if (!ioEngine.isPersistent() || persistencePath == null) { + // If persistent ioengine and a path, we will serialize out the backingMap. this.backingMap.clear(); } } @@ -1255,6 +1259,9 @@ public class BucketCache implements BlockCache, HeapSize { len == sliceBuf.limit() + block.headerSize() + HFileBlock.EXTRA_SERIALIZATION_SPACE; ByteBuffer extraInfoBuffer = ByteBuffer.allocate(HFileBlock.EXTRA_SERIALIZATION_SPACE); block.serializeExtraInfo(extraInfoBuffer); + if (LOG.isTraceEnabled()) { + LOG.trace("Write offset=" + offset + ", len=" + len); + } ioEngine.write(sliceBuf, offset); ioEngine.write(extraInfoBuffer, offset + len - HFileBlock.EXTRA_SERIALIZATION_SPACE); } else { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java index 058280d..d359f3b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterListOrOperatorWithBlkCnt.java @@ -83,7 +83,7 @@ public class TestFilterListOrOperatorWithBlkCnt { } private static long getBlkAccessCount() { - return HFile.dataBlockReadCnt.get(); + return HFile.DATABLOCK_READ_COUNT.get(); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java index 1995aa4..5c11818 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestDataBlockEncoders.java @@ -72,6 +72,7 @@ public class TestDataBlockEncoders { private static int ENCODED_DATA_OFFSET = HConstants.HFILEBLOCK_HEADER_SIZE + DataBlockEncoding.ID_SIZE; + static final byte[] HFILEBLOCK_DUMMY_HEADER = new byte[HConstants.HFILEBLOCK_HEADER_SIZE]; private RedundantKVGenerator generator = new RedundantKVGenerator(); private Random randomizer = new Random(42l); @@ -87,7 +88,7 @@ public class TestDataBlockEncoders { this.includesMemstoreTS = includesMemstoreTS; this.includesTags = includesTag; } - + private HFileBlockEncodingContext getEncodingContext(Compression.Algorithm algo, DataBlockEncoding encoding) { DataBlockEncoder encoder = encoding.getEncoder(); @@ -97,17 +98,15 @@ public class TestDataBlockEncoders { .withIncludesTags(includesTags) .withCompression(algo).build(); if (encoder != null) { - return encoder.newDataBlockEncodingContext(encoding, - HConstants.HFILEBLOCK_DUMMY_HEADER, meta); + return encoder.newDataBlockEncodingContext(encoding, HFILEBLOCK_DUMMY_HEADER, meta); } else { - return new HFileBlockDefaultEncodingContext(encoding, - HConstants.HFILEBLOCK_DUMMY_HEADER, meta); + return new HFileBlockDefaultEncodingContext(encoding, HFILEBLOCK_DUMMY_HEADER, meta); } } /** * Test data block encoding of empty KeyValue. - * + * * @throws IOException * On test failure. */ @@ -134,7 +133,7 @@ public class TestDataBlockEncoders { /** * Test KeyValues with negative timestamp. - * + * * @throws IOException * On test failure. */ @@ -179,7 +178,7 @@ public class TestDataBlockEncoders { List sampleKv = generator.generateTestKeyValues(NUMBER_OF_KV, includesTags); // create all seekers - List encodedSeekers = + List encodedSeekers = new ArrayList(); for (DataBlockEncoding encoding : DataBlockEncoding.values()) { LOG.info("Encoding: " + encoding); @@ -237,7 +236,7 @@ public class TestDataBlockEncoders { HFileBlockEncodingContext encodingContext) throws IOException { DataBlockEncoder encoder = encoding.getEncoder(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); - baos.write(HConstants.HFILEBLOCK_DUMMY_HEADER); + baos.write(HFILEBLOCK_DUMMY_HEADER); DataOutputStream dos = new DataOutputStream(baos); encoder.startBlockEncoding(encodingContext, dos); for (KeyValue kv : kvs) { @@ -332,7 +331,7 @@ public class TestDataBlockEncoders { } } } - + private void checkSeekingConsistency(List encodedSeekers, boolean seekBefore, KeyValue keyValue) { ByteBuffer expectedKeyValue = null; @@ -378,10 +377,10 @@ public class TestDataBlockEncoders { continue; } HFileBlockEncodingContext encodingContext = new HFileBlockDefaultEncodingContext(encoding, - HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext); + HFILEBLOCK_DUMMY_HEADER, fileContext); ByteArrayOutputStream baos = new ByteArrayOutputStream(); - baos.write(HConstants.HFILEBLOCK_DUMMY_HEADER); + baos.write(HFILEBLOCK_DUMMY_HEADER); DataOutputStream dos = new DataOutputStream(baos); encoder.startBlockEncoding(encodingContext, dos); for (KeyValue kv : kvList) { @@ -395,7 +394,7 @@ public class TestDataBlockEncoders { testAlgorithm(encodedData, unencodedDataBuf, encoder); } } - + @Test public void testZeroByte() throws IOException { List kvList = new ArrayList(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java index ec2befe..914a37b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekToBlockWithEncoders.java @@ -37,6 +37,7 @@ import org.junit.experimental.categories.Category; @Category(SmallTests.class) public class TestSeekToBlockWithEncoders { + static final byte[] HFILEBLOCK_DUMMY_HEADER = new byte[HConstants.HFILEBLOCK_HEADER_SIZE]; /** * Test seeking while file is encoded. @@ -261,8 +262,7 @@ public class TestSeekToBlockWithEncoders { HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false) .withIncludesMvcc(false).withIncludesTags(false) .withCompression(Compression.Algorithm.NONE).build(); - HFileBlockEncodingContext encodingContext = encoder.newDataBlockEncodingContext(encoding, - HConstants.HFILEBLOCK_DUMMY_HEADER, meta); + HFileBlockEncodingContext encodingContext = encoder.newDataBlockEncodingContext(encoding, HFILEBLOCK_DUMMY_HEADER, meta); ByteBuffer encodedBuffer = TestDataBlockEncoders.encodeKeyValues(encoding, kvs, encodingContext); DataBlockEncoder.EncodedSeeker seeker = encoder.createSeeker(KeyValue.COMPARATOR, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java index 64f46c8..16583d9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java @@ -98,7 +98,7 @@ public class TestForceCacheImportantBlocks { public void setup() { // Make sure we make a new one each time. CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = null; - HFile.dataBlockReadCnt.set(0); + HFile.DATABLOCK_READ_COUNT.set(0); } @Test @@ -115,12 +115,12 @@ public class TestForceCacheImportantBlocks { CacheStats stats = cache.getStats(); writeTestData(region); assertEquals(0, stats.getHitCount()); - assertEquals(0, HFile.dataBlockReadCnt.get()); + assertEquals(0, HFile.DATABLOCK_READ_COUNT.get()); // Do a single get, take count of caches. If we are NOT caching DATA blocks, the miss // count should go up. Otherwise, all should be cached and the miss count should not rise. region.get(new Get(Bytes.toBytes("row" + 0))); assertTrue(stats.getHitCount() > 0); - assertTrue(HFile.dataBlockReadCnt.get() > 0); + assertTrue(HFile.DATABLOCK_READ_COUNT.get() > 0); long missCount = stats.getMissCount(); region.get(new Get(Bytes.toBytes("row" + 0))); if (this.cfCacheEnabled) assertEquals(missCount, stats.getMissCount()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java index 9810448..5e24ee2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlock.java @@ -545,7 +545,7 @@ public class TestHFileBlock { for (int i = 0; i < NUM_TEST_BLOCKS; ++i) { if (!pread) { assertEquals(is.getPos(), curOffset + (i == 0 ? 0 : - HConstants.HFILEBLOCK_HEADER_SIZE)); + HConstants.HFILEBLOCK_HEADER_SIZE)); } assertEquals(expectedOffsets.get(i).longValue(), curOffset); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java index 52596f4..ec11f18 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockCompatibility.java @@ -344,8 +344,7 @@ public class TestHFileBlockCompatibility { // These constants are as they were in minorVersion 0. private static final int HEADER_SIZE = HConstants.HFILEBLOCK_HEADER_SIZE_NO_CHECKSUM; private static final boolean DONT_FILL_HEADER = HFileBlock.DONT_FILL_HEADER; - private static final byte[] DUMMY_HEADER = - HFileBlock.DUMMY_HEADER_NO_CHECKSUM; + private static final byte[] DUMMY_HEADER = HFileBlock.DUMMY_HEADER_NO_CHECKSUM; private enum State { INIT, diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java index a002f28..7bad8f4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java @@ -203,7 +203,7 @@ public class TestBlocksRead extends HBaseTestCase { } private static long getBlkAccessCount(byte[] cf) { - return HFile.dataBlockReadCnt.get(); + return HFile.DATABLOCK_READ_COUNT.get(); } private static long getBlkCount() { -- 2.6.1