From f7ae01906cda43e63843f25a44d6f7712ad08a94 Mon Sep 17 00:00:00 2001 From: eshcar Date: Thu, 16 Nov 2017 11:39:27 +0200 Subject: [PATCH] HBASE-18294 Reduce global heap pressure: flush based on heap occupancy --- .../hadoop/hbase/client/ConnectionUtils.java | 2 +- .../org/apache/hadoop/hbase/client/Mutation.java | 2 +- .../org/apache/hadoop/hbase/client/Result.java | 2 +- .../java/org/apache/hadoop/hbase/CellUtil.java | 2 +- .../org/apache/hadoop/hbase/PrivateCellUtil.java | 7 +- .../apache/hadoop/hbase/util/MapReduceCell.java | 2 +- .../hadoop/hbase/io/hfile/HFileBlockIndex.java | 2 +- .../hbase/regionserver/AbstractMemStore.java | 4 +- .../hbase/regionserver/CSLMImmutableSegment.java | 3 +- .../regionserver/CellArrayImmutableSegment.java | 9 +- .../regionserver/CellChunkImmutableSegment.java | 48 ++++++- .../hbase/regionserver/CompactingMemStore.java | 8 +- .../hbase/regionserver/CompactionPipeline.java | 37 ++++-- .../regionserver/CompositeImmutableSegment.java | 10 +- .../hadoop/hbase/regionserver/DefaultMemStore.java | 6 +- .../regionserver/FlushAllLargeStoresPolicy.java | 2 +- .../hbase/regionserver/FlushLargeStoresPolicy.java | 40 +++--- .../FlushNonSloppyStoresFirstPolicy.java | 2 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 143 ++++++++++++--------- .../hadoop/hbase/regionserver/HRegionServer.java | 4 +- .../apache/hadoop/hbase/regionserver/HStore.java | 2 +- .../hbase/regionserver/ImmutableMemStoreLAB.java | 12 ++ .../hadoop/hbase/regionserver/MemStoreFlusher.java | 16 +-- .../hadoop/hbase/regionserver/MemStoreLAB.java | 8 +- .../hadoop/hbase/regionserver/MemStoreLABImpl.java | 10 ++ .../hadoop/hbase/regionserver/MemStoreSize.java | 52 +++++++- .../hadoop/hbase/regionserver/MemStoreSizing.java | 58 +++------ .../MetricsTableWrapperAggregateImpl.java | 2 +- .../hadoop/hbase/regionserver/MutableSegment.java | 7 +- .../apache/hadoop/hbase/regionserver/Region.java | 2 +- .../regionserver/RegionServicesForStores.java | 6 +- .../apache/hadoop/hbase/regionserver/Segment.java | 105 ++++++++++----- .../hadoop/hbase/regionserver/StoreScanner.java | 2 +- .../java/org/apache/hadoop/hbase/wal/WALEdit.java | 2 +- .../hadoop/hbase/TestGlobalMemStoreSize.java | 4 +- .../hbase/TestPartialResultsFromClientSide.java | 2 +- .../TestServerSideScanMetricsFromClientSide.java | 3 +- .../hbase/client/TestAsyncRegionAdminApi.java | 12 +- .../hadoop/hbase/client/TestClientPushback.java | 2 +- ...estNegativeMemStoreSizeWithSlowCoprocessor.java | 2 +- .../hbase/regionserver/TestCompactingMemStore.java | 6 +- .../TestCompactingToCellFlatMapMemStore.java | 2 +- .../regionserver/TestEndToEndSplitTransaction.java | 2 +- .../hadoop/hbase/regionserver/TestHRegion.java | 30 ++--- .../regionserver/TestHRegionReplayEvents.java | 36 +++--- .../hadoop/hbase/regionserver/TestHStore.java | 6 +- .../regionserver/TestPerColumnFamilyFlush.java | 20 +-- .../regionserver/TestRegionServerAccounting.java | 20 +-- .../hadoop/hbase/regionserver/TestWALLockup.java | 4 +- .../TestWalAndCompactingMemStoreFlush.java | 18 +-- 50 files changed, 478 insertions(+), 310 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java index 780dcf9927..aa3e4bfa36 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java @@ -319,7 +319,7 @@ public final class ConnectionUtils { long estimatedHeapSizeOfResult = 0; // We don't make Iterator here for (Cell cell : rs.rawCells()) { - estimatedHeapSizeOfResult += PrivateCellUtil.estimatedHeapSizeOf(cell); + estimatedHeapSizeOfResult += PrivateCellUtil.estimatedSizeOfCell(cell); } return estimatedHeapSizeOfResult; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java index dd2c2f032e..f98f22f0ec 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java @@ -438,7 +438,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C size * ClassSize.REFERENCE); for(Cell cell : entry.getValue()) { - heapsize += PrivateCellUtil.estimatedHeapSizeOf(cell); + heapsize += PrivateCellUtil.estimatedSizeOfCell(cell); } } heapsize += getAttributeSize(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java index d30c25f87b..832689e37f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java @@ -859,7 +859,7 @@ public class Result implements CellScannable, CellScanner { return size; } for (Cell c : result.rawCells()) { - size += PrivateCellUtil.estimatedHeapSizeOf(c); + size += PrivateCellUtil.estimatedSizeOfCell(c); } return size; } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java index 52eb8faceb..4602bff68a 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java @@ -1007,7 +1007,7 @@ public final class CellUtil { */ @Deprecated public static long estimatedHeapSizeOf(final Cell cell) { - return PrivateCellUtil.estimatedHeapSizeOf(cell); + return PrivateCellUtil.estimatedSizeOfCell(cell); } /********************* tags *************************************/ diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java index d70d974717..8c92f73d33 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java @@ -310,7 +310,7 @@ public class PrivateCellUtil { @Override public long heapSize() { - long sum = HEAP_SIZE_OVERHEAD + estimatedHeapSizeOf(cell); + long sum = HEAP_SIZE_OVERHEAD + estimatedSizeOfCell(cell); if (this.tags != null) { sum += ClassSize.sizeOf(this.tags); } @@ -507,7 +507,7 @@ public class PrivateCellUtil { @Override public long heapSize() { - long sum = HEAP_SIZE_OVERHEAD + estimatedHeapSizeOf(cell); + long sum = HEAP_SIZE_OVERHEAD + estimatedSizeOfCell(cell); // this.tags is on heap byte[] if (this.tags != null) { sum += ClassSize.sizeOf(this.tags); @@ -2589,10 +2589,11 @@ public class PrivateCellUtil { * {@link HeapSize} we call {@link HeapSize#heapSize()} so cell can give a correct value. In other * cases we just consider the bytes occupied by the cell components ie. row, CF, qualifier, * timestamp, type, value and tags. + * Note that this can be the JVM heap space (on-heap) or the OS heap (off-heap) * @param cell * @return estimate of the heap space */ - public static long estimatedHeapSizeOf(final Cell cell) { + public static long estimatedSizeOfCell(final Cell cell) { if (cell instanceof HeapSize) { return ((HeapSize) cell).heapSize(); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapReduceCell.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapReduceCell.java index f3bac52bba..2a2dd6e604 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapReduceCell.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapReduceCell.java @@ -242,7 +242,7 @@ public class MapReduceCell extends ByteBufferCell implements ExtendedCell { @Override public long heapSize() { - return PrivateCellUtil.estimatedHeapSizeOf(cell); + return PrivateCellUtil.estimatedSizeOfCell(cell); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java index 557a69c6b0..0c007adce9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java @@ -266,7 +266,7 @@ public class HFileBlockIndex { // Adding blockKeys for (Cell key : blockKeys) { - heapSize += ClassSize.align(PrivateCellUtil.estimatedHeapSizeOf(key)); + heapSize += ClassSize.align(PrivateCellUtil.estimatedSizeOfCell(key)); } } // Add comparator and the midkey atomicreference diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java index ee480be1e2..3b55074627 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java @@ -171,7 +171,9 @@ public abstract class AbstractMemStore implements MemStore { } MemStoreSizing getSnapshotSizing() { - return new MemStoreSizing(this.snapshot.keySize(), this.snapshot.heapSize()); + return new MemStoreSizing(this.snapshot.keySize(), + this.snapshot.heapSize(), + this .snapshot.offHeapSize()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CSLMImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CSLMImmutableSegment.java index b5fe03397a..6af84cb5ba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CSLMImmutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CSLMImmutableSegment.java @@ -39,7 +39,8 @@ public class CSLMImmutableSegment extends ImmutableSegment { protected CSLMImmutableSegment(Segment segment) { super(segment); // update the segment metadata heap size - incSize(0, -MutableSegment.DEEP_OVERHEAD + DEEP_OVERHEAD_CSLM); + long indexOverhead = -MutableSegment.DEEP_OVERHEAD + DEEP_OVERHEAD_CSLM; + incSize(0, indexOverhead, 0); // CSLM is always on-heap } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.java index 0e80b1d0ed..f6d05a488c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayImmutableSegment.java @@ -45,7 +45,7 @@ public class CellArrayImmutableSegment extends ImmutableSegment { protected CellArrayImmutableSegment(CellComparator comparator, MemStoreSegmentsIterator iterator, MemStoreLAB memStoreLAB, int numOfCells, MemStoreCompactionStrategy.Action action) { super(null, comparator, memStoreLAB); // initiailize the CellSet with NULL - incSize(0, DEEP_OVERHEAD_CAM); + incSize(0, DEEP_OVERHEAD_CAM, 0); // CAM is always on-heap // build the new CellSet based on CellArrayMap and update the CellSet of the new Segment initializeCellSet(numOfCells, iterator, action); } @@ -58,7 +58,8 @@ public class CellArrayImmutableSegment extends ImmutableSegment { protected CellArrayImmutableSegment(CSLMImmutableSegment segment, MemStoreSizing memstoreSizing, MemStoreCompactionStrategy.Action action) { super(segment); // initiailize the upper class - incSize(0, DEEP_OVERHEAD_CAM - CSLMImmutableSegment.DEEP_OVERHEAD_CSLM); + long indexOverhead = DEEP_OVERHEAD_CAM - CSLMImmutableSegment.DEEP_OVERHEAD_CSLM; + incSize(0, indexOverhead, 0); // CAM is always on-heap int numOfCells = segment.getCellsCount(); // build the new CellSet based on CellChunkMap and update the CellSet of this Segment reinitializeCellSet(numOfCells, segment.getScanner(Long.MAX_VALUE), segment.getCellSet(), @@ -66,8 +67,8 @@ public class CellArrayImmutableSegment extends ImmutableSegment { // arrange the meta-data size, decrease all meta-data sizes related to SkipList; // add sizes of CellArrayMap entry (reinitializeCellSet doesn't take the care for the sizes) long newSegmentSizeDelta = numOfCells*(indexEntrySize()-ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY); - incSize(0, newSegmentSizeDelta); - memstoreSizing.incMemStoreSize(0, newSegmentSizeDelta); + incSize(0, newSegmentSizeDelta, 0); + memstoreSizing.incMemStoreSize(0, newSegmentSizeDelta, 0); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java index 7db00a0c48..ba59eeccf9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellChunkImmutableSegment.java @@ -51,7 +51,15 @@ public class CellChunkImmutableSegment extends ImmutableSegment { protected CellChunkImmutableSegment(CellComparator comparator, MemStoreSegmentsIterator iterator, MemStoreLAB memStoreLAB, int numOfCells, MemStoreCompactionStrategy.Action action) { super(null, comparator, memStoreLAB); // initialize the CellSet with NULL - incSize(0, DEEP_OVERHEAD_CCM); // initiate the heapSize with the size of the segment metadata + long indexOverhead = DEEP_OVERHEAD_CCM; + // memStoreLAB cannot be null in this class + boolean onHeap = getMemStoreLAB().isOnHeap(); + // initiate the heapSize with the size of the segment metadata + if(onHeap) { + incSize(0, indexOverhead, 0); + } else { + incSize(0, 0, indexOverhead); + } // build the new CellSet based on CellArrayMap and update the CellSet of the new Segment initializeCellSet(numOfCells, iterator, action); } @@ -64,7 +72,16 @@ public class CellChunkImmutableSegment extends ImmutableSegment { protected CellChunkImmutableSegment(CSLMImmutableSegment segment, MemStoreSizing memstoreSizing, MemStoreCompactionStrategy.Action action) { super(segment); // initiailize the upper class - incSize(0,-CSLMImmutableSegment.DEEP_OVERHEAD_CSLM + CellChunkImmutableSegment.DEEP_OVERHEAD_CCM); + long indexOverhead = -CSLMImmutableSegment.DEEP_OVERHEAD_CSLM + + CellChunkImmutableSegment.DEEP_OVERHEAD_CCM; + // memStoreLAB cannot be null in this class + boolean onHeap = getMemStoreLAB().isOnHeap(); + // initiate the heapSize with the size of the segment metadata + if(onHeap) { + incSize(0, indexOverhead, 0); + } else { + incSize(0, 0, indexOverhead); + } int numOfCells = segment.getCellsCount(); // build the new CellSet based on CellChunkMap reinitializeCellSet(numOfCells, segment.getScanner(Long.MAX_VALUE), segment.getCellSet(), @@ -73,9 +90,32 @@ public class CellChunkImmutableSegment extends ImmutableSegment { // add sizes of CellChunkMap entry, decrease also Cell object sizes // (reinitializeCellSet doesn't take the care for the sizes) long newSegmentSizeDelta = numOfCells*(indexEntrySize()-ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY); + if(onHeap) { + incSize(0, newSegmentSizeDelta, 0); + memstoreSizing.incMemStoreSize(0, newSegmentSizeDelta, 0); + } else { + incSize(0, 0, newSegmentSizeDelta); + memstoreSizing.incMemStoreSize(0, 0, newSegmentSizeDelta); + + } + } - incSize(0, newSegmentSizeDelta); - memstoreSizing.incMemStoreSize(0, newSegmentSizeDelta); + @Override + protected long indexEntryOnHeapSize(boolean onHeap) { + if(onHeap) { + return indexEntrySize(); + } + // else the index is allocated off-heap + return 0; + } + + @Override + protected long indexEntryOffHeapSize(boolean offHeap) { + if(offHeap) { + return indexEntrySize(); + } + // else the index is allocated on-heap + return 0; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java index d2502528f7..96eb6476a5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java @@ -149,9 +149,9 @@ public class CompactingMemStore extends AbstractMemStore { @Override public MemStoreSize size() { MemStoreSizing memstoreSizing = new MemStoreSizing(); - memstoreSizing.incMemStoreSize(this.active.keySize(), this.active.heapSize()); + memstoreSizing.incMemStoreSize(active.getMemStoreSizing()); for (Segment item : pipeline.getSegments()) { - memstoreSizing.incMemStoreSize(item.keySize(), item.heapSize()); + memstoreSizing.incMemStoreSize(item.getMemStoreSizing()); } return memstoreSizing; } @@ -223,13 +223,13 @@ public class CompactingMemStore extends AbstractMemStore { // if snapshot is empty the tail of the pipeline (or everything in the memstore) is flushed if (compositeSnapshot) { snapshotSizing = pipeline.getPipelineSizing(); - snapshotSizing.incMemStoreSize(this.active.keySize(), this.active.heapSize()); + snapshotSizing.incMemStoreSize(active.getMemStoreSizing()); } else { snapshotSizing = pipeline.getTailSizing(); } } return snapshotSizing.getDataSize() > 0 ? snapshotSizing - : new MemStoreSize(this.active.keySize(), this.active.heapSize()); + : new MemStoreSize(active.getMemStoreSizing()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java index 2f479e9016..3364185d37 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java @@ -142,14 +142,19 @@ public class CompactionPipeline { if(segment != null) newDataSize = segment.keySize(); long dataSizeDelta = suffixDataSize - newDataSize; long suffixHeapSize = getSegmentsHeapSize(suffix); + long suffixOffHeapSize = getSegmentsOffHeapSize(suffix); long newHeapSize = 0; - if(segment != null) newHeapSize = segment.heapSize(); + long newOffHeapSize = 0; + if(segment != null) { + newHeapSize = segment.heapSize(); + newOffHeapSize = segment.offHeapSize(); + } + long offHeapSizeDelta = suffixOffHeapSize - newOffHeapSize; long heapSizeDelta = suffixHeapSize - newHeapSize; - region.addMemStoreSize(new MemStoreSizing(-dataSizeDelta, -heapSizeDelta)); + region.addMemStoreSize(new MemStoreSize(-dataSizeDelta, -heapSizeDelta, -offHeapSizeDelta)); if (LOG.isDebugEnabled()) { - LOG.debug("Suffix data size: " + suffixDataSize + " new segment data size: " - + newDataSize + ". Suffix heap size: " + suffixHeapSize - + " new segment heap size: " + newHeapSize); + LOG.debug("Suffix data size: " + suffixDataSize + " new segment data size: " + newDataSize + + ". Suffix heap size: " + suffixHeapSize + " new segment heap size: " + newHeapSize); } } return true; @@ -163,6 +168,14 @@ public class CompactionPipeline { return res; } + private static long getSegmentsOffHeapSize(List list) { + long res = 0; + for (Segment segment : list) { + res += segment.offHeapSize(); + } + return res; + } + private static long getSegmentsKeySize(List list) { long res = 0; for (Segment segment : list) { @@ -204,7 +217,8 @@ public class CompactionPipeline { if(region != null) { // update the global memstore size counter // upon flattening there is no change in the data size - region.addMemStoreSize(new MemStoreSize(0, newMemstoreAccounting.getHeapSize())); + region.addMemStoreSize(new MemStoreSize(0, newMemstoreAccounting.getHeapSize(), + newMemstoreAccounting.getOffHeapSize())); } LOG.debug("Compaction pipeline segment " + s + " was flattened"); return true; @@ -242,19 +256,16 @@ public class CompactionPipeline { public MemStoreSizing getTailSizing() { LinkedList localCopy = readOnlyCopy; if (localCopy.isEmpty()) return new MemStoreSizing(); - return new MemStoreSizing(localCopy.peekLast().keySize(), localCopy.peekLast().heapSize()); + return new MemStoreSizing(localCopy.peekLast().getMemStoreSizing()); } public MemStoreSizing getPipelineSizing() { - long keySize = 0; - long heapSize = 0; + MemStoreSizing memStoreSizing = new MemStoreSizing(); LinkedList localCopy = readOnlyCopy; - if (localCopy.isEmpty()) return new MemStoreSizing(); for (Segment segment : localCopy) { - keySize += segment.keySize(); - heapSize += segment.heapSize(); + memStoreSizing.incMemStoreSize(segment.getMemStoreSizing()); } - return new MemStoreSizing(keySize, heapSize); + return memStoreSizing; } private void swapSuffix(List suffix, ImmutableSegment segment, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java index 93658193c1..3fc6038e2f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java @@ -89,14 +89,6 @@ public class CompositeImmutableSegment extends ImmutableSegment { } /** - * @return the first cell in the segment that has equal or greater key than the given cell - */ - @Override - public Cell getFirstAfter(Cell cell) { - throw new IllegalStateException("Not supported by CompositeImmutableScanner"); - } - - /** * Closing a segment before it is being discarded */ @Override @@ -208,7 +200,7 @@ public class CompositeImmutableSegment extends ImmutableSegment { * Updates the heap size counter of the segment by the given delta */ @Override - protected void incSize(long delta, long heapOverhead) { + protected void incSize(long delta, long heapOverhead, long offHeapOverhead) { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index 0e0276a1f4..aee167c012 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -104,9 +104,9 @@ public class DefaultMemStore extends AbstractMemStore { */ @Override public MemStoreSize getFlushableSize() { - MemStoreSize snapshotSize = getSnapshotSize(); + MemStoreSize snapshotSize = getSnapshotSizing(); return snapshotSize.getDataSize() > 0 ? snapshotSize - : new MemStoreSize(keySize(), heapSize()); + : new MemStoreSize(active.getMemStoreSizing()); } @Override @@ -155,7 +155,7 @@ public class DefaultMemStore extends AbstractMemStore { @Override public MemStoreSize size() { - return new MemStoreSize(this.active.keySize(), this.active.heapSize()); + return new MemStoreSize(active.getMemStoreSizing()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java index e4476d040b..f34b262147 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java @@ -44,7 +44,7 @@ public class FlushAllLargeStoresPolicy extends FlushLargeStoresPolicy { // Family number might also be zero in some of our unit test case return; } - this.flushSizeLowerBound = getFlushSizeLowerBound(region); + setFlushSizeLowerBounds(region); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java index 1610fd882f..64a216bf29 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java @@ -40,18 +40,23 @@ public abstract class FlushLargeStoresPolicy extends FlushPolicy { public static final long DEFAULT_HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN = 1024 * 1024 * 16L; - protected long flushSizeLowerBound = -1; + protected long flushHeapSizeLowerBound = -1; + protected long flushOffHeapSizeLowerBound = -1; - protected long getFlushSizeLowerBound(HRegion region) { + protected void setFlushSizeLowerBounds(HRegion region) { int familyNumber = region.getTableDescriptor().getColumnFamilyCount(); // For multiple families, lower bound is the "average flush size" by default // unless setting in configuration is larger. - long flushSizeLowerBound = region.getMemStoreFlushSize() / familyNumber; + flushHeapSizeLowerBound = region.getMemStoreFlushHeapSize() / familyNumber; + flushOffHeapSizeLowerBound = region.getMemStoreFlushOffHeapSize() / familyNumber; long minimumLowerBound = getConf().getLong(HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, DEFAULT_HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN); - if (minimumLowerBound > flushSizeLowerBound) { - flushSizeLowerBound = minimumLowerBound; + if (minimumLowerBound > flushHeapSizeLowerBound) { + flushHeapSizeLowerBound = minimumLowerBound; + } + if (minimumLowerBound > flushOffHeapSizeLowerBound) { + flushOffHeapSizeLowerBound = minimumLowerBound; } // use the setting in table description if any String flushedSizeLowerBoundString = @@ -60,30 +65,35 @@ public abstract class FlushLargeStoresPolicy extends FlushPolicy { if (LOG.isDebugEnabled()) { LOG.debug("No " + HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND + " set in description of table " + region.getTableDescriptor().getTableName() - + ", use config (" + flushSizeLowerBound + ") instead"); + + ", use config (flushHeapSizeLowerBound=" + flushHeapSizeLowerBound + + ", flushOffHeapSizeLowerBound=" + flushOffHeapSizeLowerBound+") instead"); } } else { try { - flushSizeLowerBound = Long.parseLong(flushedSizeLowerBoundString); + flushHeapSizeLowerBound = Long.parseLong(flushedSizeLowerBoundString); + flushOffHeapSizeLowerBound = flushHeapSizeLowerBound; } catch (NumberFormatException nfe) { // fall back for fault setting LOG.warn("Number format exception when parsing " + HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND + " for table " - + region.getTableDescriptor().getTableName() + ":" + flushedSizeLowerBoundString + ". " + nfe - + ", use config (" + flushSizeLowerBound + ") instead"); + + region.getTableDescriptor().getTableName() + ":" + flushedSizeLowerBoundString + + ". " + nfe + + ", use config (flushHeapSizeLowerBound=" + flushHeapSizeLowerBound + + ", flushOffHeapSizeLowerBound=" + flushOffHeapSizeLowerBound+") instead"); } } - return flushSizeLowerBound; } protected boolean shouldFlush(HStore store) { - if (store.getMemStoreSize().getDataSize() > this.flushSizeLowerBound) { + if (store.getMemStoreSize().getHeapSize() > this.flushHeapSizeLowerBound + || store.getMemStoreSize().getOffHeapSize() > this.flushOffHeapSizeLowerBound) { if (LOG.isDebugEnabled()) { - LOG.debug("Flush Column Family " + store.getColumnFamilyName() + " of " + - region.getRegionInfo().getEncodedName() + " because memstoreSize=" + - store.getMemStoreSize().getDataSize() + " > lower bound=" - + this.flushSizeLowerBound); + LOG.debug("Flush Column Family " + store.getColumnFamilyName() + " of " + + region.getRegionInfo().getEncodedName() + " because memstore size=" + + store.getMemStoreSize().toString() + + " (flush heap lower bound=" + this.flushHeapSizeLowerBound + + " flush off-heap lower bound=" + this.flushOffHeapSizeLowerBound + ")"); } return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java index ed23e3d9d8..e95de9dea4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java @@ -63,7 +63,7 @@ public class FlushNonSloppyStoresFirstPolicy extends FlushLargeStoresPolicy { @Override protected void configureForRegion(HRegion region) { super.configureForRegion(region); - this.flushSizeLowerBound = getFlushSizeLowerBound(region); + setFlushSizeLowerBounds(region); for (HStore store : region.stores.values()) { if (store.isSloppyMemStore()) { sloppyStores.add(store); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 14d6a9def7..e34b5fb2dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -63,7 +63,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.LongAdder; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; @@ -280,7 +279,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // TODO: account for each registered handler in HeapSize computation private Map coprocessorServiceHandlers = Maps.newHashMap(); - private final AtomicLong memstoreDataSize = new AtomicLong(0);// Track data size in all memstores + // Track data size in all memstores + private final MemStoreSizing memStoreSize = new MemStoreSizing(); private final RegionServicesForStores regionServicesForStores = new RegionServicesForStores(this); // Debug possible data loss due to WAL off @@ -624,7 +624,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi final WriteState writestate = new WriteState(); - long memstoreFlushSize; + long memStoreFlushHeapSize; + long memStoreFlushOffHeapSize; final long timestampSlop; final long rowProcessorTimeout; @@ -636,7 +637,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi private long flushCheckInterval; // flushPerChanges is to prevent too many changes in memstore private long flushPerChanges; - private long blockingMemStoreSize; + private long blockingMemStoreHeapSize; + private long blockingMemStoreOffHeapSize; // Used to guard closes final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); @@ -819,10 +821,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE); } - this.memstoreFlushSize = flushSize; - this.blockingMemStoreSize = this.memstoreFlushSize * - conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, - HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER); + this.memStoreFlushHeapSize = flushSize; + this.memStoreFlushOffHeapSize = flushSize; + long mult = conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, + HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER); + this.blockingMemStoreHeapSize = this.memStoreFlushHeapSize * mult; + this.blockingMemStoreOffHeapSize = this.memStoreFlushOffHeapSize * mult; } /** @@ -1184,32 +1188,38 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi /** * Increase the size of mem store in this region and the size of global mem * store - * @return the size of memstore in this region */ - public long addAndGetMemStoreSize(MemStoreSize memstoreSize) { + public void incMemStoreSize(MemStoreSize memStoreSize) { if (this.rsAccounting != null) { - rsAccounting.incGlobalMemStoreSize(memstoreSize); + rsAccounting.incGlobalMemStoreSize(memStoreSize); } - long size = this.memstoreDataSize.addAndGet(memstoreSize.getDataSize()); - checkNegativeMemStoreDataSize(size, memstoreSize.getDataSize()); - return size; + long size; + synchronized (this) { + this.memStoreSize.incMemStoreSize(memStoreSize); + size = this.memStoreSize.getDataSize(); + } + checkNegativeMemStoreDataSize(size, memStoreSize.getDataSize()); } - public void decrMemStoreSize(MemStoreSize memstoreSize) { + public void decrMemStoreSize(MemStoreSize memStoreSize) { if (this.rsAccounting != null) { - rsAccounting.decGlobalMemStoreSize(memstoreSize); + rsAccounting.decGlobalMemStoreSize(memStoreSize); } - long size = this.memstoreDataSize.addAndGet(-memstoreSize.getDataSize()); - checkNegativeMemStoreDataSize(size, -memstoreSize.getDataSize()); + long size; + synchronized (this) { + this.memStoreSize.decMemStoreSize(memStoreSize); + size = this.memStoreSize.getDataSize(); + } + checkNegativeMemStoreDataSize(size, -memStoreSize.getDataSize()); } - private void checkNegativeMemStoreDataSize(long memstoreDataSize, long delta) { - // This is extremely bad if we make memstoreSize negative. Log as much info on the offending + private void checkNegativeMemStoreDataSize(long memStoreDataSize, long delta) { + // This is extremely bad if we make memStoreSize negative. Log as much info on the offending // caller as possible. (memStoreSize might be a negative value already -- freeing memory) - if (memstoreDataSize < 0) { + if (memStoreDataSize < 0) { LOG.error("Asked to modify this region's (" + this.toString() - + ") memstoreSize to a negative value which is incorrect. Current memstoreSize=" - + (memstoreDataSize - delta) + ", delta=" + delta, new Exception()); + + ") memStoreSize to a negative value which is incorrect. Current memStoreSize=" + + (memStoreDataSize - delta) + ", delta=" + delta, new Exception()); } } @@ -1242,8 +1252,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } @Override - public long getMemStoreSize() { - return memstoreDataSize.get(); + public long getMemStoreDataSize() { + return memStoreSize.getDataSize(); } /** @return store services for this region, to access services required by store level needs */ @@ -1524,7 +1534,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi int failedfFlushCount = 0; int flushCount = 0; long tmp = 0; - long remainingSize = this.memstoreDataSize.get(); + long remainingSize = this.memStoreSize.getDataSize(); while (remainingSize > 0) { try { internalFlushcache(status); @@ -1533,7 +1543,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi " (carrying snapshot?) " + this); } flushCount++; - tmp = this.memstoreDataSize.get(); + tmp = this.memStoreSize.getDataSize(); if (tmp >= remainingSize) { failedfFlushCount++; } @@ -1573,7 +1583,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi getRegionServerServices().abort("Assertion failed while closing store " + getRegionInfo().getRegionNameAsString() + " " + store + ". flushableSize expected=0, actual= " + flushableSize - + ". Current memstoreSize=" + getMemStoreSize() + ". Maybe a coprocessor " + + ". Current memStoreSize=" + getMemStoreDataSize() + ". Maybe a coprocessor " + "operation failed and left the memstore in a partially updated state.", null); } } @@ -1616,9 +1626,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi this.closed.set(true); if (!canFlush) { - this.decrMemStoreSize(new MemStoreSizing(memstoreDataSize.get(), getMemStoreHeapSize())); - } else if (memstoreDataSize.get() != 0) { - LOG.error("Memstore size is " + memstoreDataSize.get()); + this.decrMemStoreSize(new MemStoreSize(memStoreSize)); + } else if (memStoreSize.getDataSize() != 0) { + LOG.error("Memstore data size is " + memStoreSize.getDataSize()); } if (coprocessorHost != null) { status.setStatus("Running coprocessor post-close hooks"); @@ -1639,7 +1649,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } private long getMemStoreHeapSize() { - return stores.values().stream().mapToLong(s -> s.getMemStoreSize().getHeapSize()).sum(); + return memStoreSize.getHeapSize(); + } + + private long getMemStoreOffHeapSize() { + return memStoreSize.getOffHeapSize(); } /** Wait for all current flushes and compactions of the region to complete */ @@ -1755,7 +1769,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi * @return True if its worth doing a flush before we put up the close flag. */ private boolean worthPreFlushing() { - return this.memstoreDataSize.get() > + return this.memStoreSize.getDataSize() > this.conf.getLong("hbase.hregion.preclose.flush.size", 1024 * 1024 * 5); } @@ -2373,12 +2387,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // bulk loaded file between memory and existing hfiles. It wants a good seqeunceId that belongs // to no other that it can use to associate with the bulk load. Hence this little dance below // to go get one. - if (this.memstoreDataSize.get() <= 0) { + if (this.memStoreSize.getDataSize() <= 0) { // Take an update lock so no edits can come into memory just yet. this.updatesLock.writeLock().lock(); WriteEntry writeEntry = null; try { - if (this.memstoreDataSize.get() <= 0) { + if (this.memStoreSize.getDataSize() <= 0) { // Presume that if there are still no edits in the memstore, then there are no edits for // this region out in the WAL subsystem so no need to do any trickery clearing out // edits in the WAL sub-system. Up the sequence number so the resulting flush id is for @@ -2516,7 +2530,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } } LOG.info("Flushing " + + storesToFlush.size() + "/" + stores.size() + - " column families, memstore=" + StringUtils.byteDesc(this.memstoreDataSize.get()) + + " column families, memstore=" + StringUtils.byteDesc(this.memStoreSize.getDataSize()) + ((perCfExtras != null && perCfExtras.length() > 0)? perCfExtras.toString(): "") + ((wal != null) ? "" : "; WAL is null, using passed sequenceid=" + sequenceId)); } @@ -2703,7 +2717,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } long time = EnvironmentEdgeManager.currentTime() - startTime; - long memstoresize = this.memstoreDataSize.get(); + long memstoresize = this.memStoreSize.getDataSize(); String msg = "Finished memstore flush of ~" + StringUtils.byteDesc(prepareResult.totalFlushableSize.getDataSize()) + "/" + prepareResult.totalFlushableSize.getDataSize() + ", currentsize=" @@ -3041,7 +3055,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi return true; }); // update memStore size - region.addAndGetMemStoreSize(memStoreAccounting); + region.incMemStoreSize(memStoreAccounting); } public boolean isDone() { @@ -3795,8 +3809,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi initialized = true; } doMiniBatchMutate(batchOp); - long newSize = this.getMemStoreSize(); - requestFlushIfNeeded(newSize); + requestFlushIfNeeded(); } } finally { batchOp.closeRegionOperation(); @@ -4156,7 +4169,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // If catalog region, do not impose resource constraints or block updates. if (this.getRegionInfo().isMetaRegion()) return; - if (this.memstoreDataSize.get() > this.blockingMemStoreSize) { + if (this.memStoreSize.getHeapSize() > this.blockingMemStoreHeapSize + || this.memStoreSize.getOffHeapSize() > this.blockingMemStoreOffHeapSize) { blockedRequestsCount.increment(); requestFlush(); throw new RegionTooBusyException("Above memstore limit, " + @@ -4164,8 +4178,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi this.getRegionInfo().getRegionNameAsString()) + ", server=" + (this.getRegionServerServices() == null ? "unknown" : this.getRegionServerServices().getServerName()) + - ", memstoreSize=" + memstoreDataSize.get() + - ", blockingMemStoreSize=" + blockingMemStoreSize); + ", memStoreSize=" + memStoreSize + + ", blockingMemStoreHeapSize=" + blockingMemStoreHeapSize + + ", blockingMemStoreOffHeapSize=" + blockingMemStoreOffHeapSize + ); } } @@ -4283,8 +4299,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi * @param size * @return True if size is over the flush threshold */ - private boolean isFlushSize(final long size) { - return size > this.memstoreFlushSize; + private boolean isFlushSize(MemStoreSize size) { + + return size.getHeapSize() > getMemStoreFlushHeapSize() + || size.getOffHeapSize() > getMemStoreFlushOffHeapSize(); } /** @@ -4575,7 +4593,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi rsAccounting.addRegionReplayEditsSize(getRegionInfo().getRegionName(), memstoreSize); } - flush = isFlushSize(this.addAndGetMemStoreSize(memstoreSize)); + incMemStoreSize(memstoreSize); + flush = isFlushSize(this.memStoreSize); if (flush) { internalFlushcache(null, currentEditSeqId, stores.values(), status, false, FlushLifeCycleTracker.DUMMY); @@ -6481,7 +6500,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi scannerContext.incrementBatchProgress(results.size()); for (Cell cell : results) { scannerContext.incrementSizeProgress(PrivateCellUtil.estimatedSerializedSizeOf(cell), - PrivateCellUtil.estimatedHeapSizeOf(cell)); + PrivateCellUtil.estimatedSizeOfCell(cell)); } } @@ -7188,8 +7207,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi return null; } ClientProtos.RegionLoadStats.Builder stats = ClientProtos.RegionLoadStats.newBuilder(); - stats.setMemStoreLoad((int) (Math.min(100, (this.memstoreDataSize.get() * 100) / this - .memstoreFlushSize))); + stats.setMemStoreLoad((int) (Math.min(100, (this.memStoreSize.getDataSize() * 100) / this + .memStoreFlushHeapSize))); if (rsServices.getHeapMemoryManager() != null) { // the HeapMemoryManager uses -0.0 to signal a problem asking the JVM, // so we could just do the calculation below and we'll get a 0. @@ -7331,8 +7350,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } finally { closeRegionOperation(); if (!mutations.isEmpty()) { - long newSize = this.addAndGetMemStoreSize(memstoreAccounting); - requestFlushIfNeeded(newSize); + this.incMemStoreSize(memstoreAccounting); + requestFlushIfNeeded(); } } } @@ -7485,7 +7504,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi rowLock.release(); } // Request a cache flush if over the limit. Do it outside update lock. - if (isFlushSize(addAndGetMemStoreSize(memstoreAccounting))) { + incMemStoreSize(memstoreAccounting); + if (isFlushSize(memStoreSize)) { requestFlush(); } closeRegionOperation(op); @@ -7787,7 +7807,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi ClassSize.OBJECT + ClassSize.ARRAY + 50 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT + - (14 * Bytes.SIZEOF_LONG) + + (16 * Bytes.SIZEOF_LONG) + 3 * Bytes.SIZEOF_BOOLEAN); // woefully out of date - currently missing: @@ -7803,7 +7823,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi public static final long DEEP_OVERHEAD = FIXED_OVERHEAD + ClassSize.OBJECT + // closeLock (2 * ClassSize.ATOMIC_BOOLEAN) + // closed, closing - (4 * ClassSize.ATOMIC_LONG) + // memStoreSize, numPutsWithoutWAL, dataInMemoryWithoutWAL, + (3 * ClassSize.ATOMIC_LONG) + // numPutsWithoutWAL, dataInMemoryWithoutWAL, // compactionsFailed (2 * ClassSize.CONCURRENT_HASHMAP) + // lockedRows, scannerReadPoints WriteState.HEAP_SIZE + // writestate @@ -8305,8 +8325,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi : CellComparatorImpl.COMPARATOR; } - public long getMemStoreFlushSize() { - return this.memstoreFlushSize; + public long getMemStoreFlushHeapSize() { + return this.memStoreFlushHeapSize; + } + + public long getMemStoreFlushOffHeapSize() { + return this.memStoreFlushOffHeapSize; } //// method for debugging tests @@ -8324,7 +8348,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } buf.append("end-of-stores"); buf.append(", memstore size "); - buf.append(getMemStoreSize()); + buf.append(getMemStoreDataSize()); if (getRegionInfo().getRegionNameAsString().startsWith(regionName)) { throw new RuntimeException(buf.toString()); } @@ -8355,8 +8379,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi RpcServer.getRequestUser().orElse(null)); } - private void requestFlushIfNeeded(long memstoreTotalSize) throws RegionTooBusyException { - if (memstoreTotalSize > this.getMemStoreFlushSize()) { + private void requestFlushIfNeeded() throws RegionTooBusyException { + if (getMemStoreHeapSize() > this.getMemStoreFlushHeapSize() + || getMemStoreOffHeapSize() > this.getMemStoreFlushOffHeapSize()) { requestFlush(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 4c34fe0e0a..7d70ff6fa9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1611,7 +1611,7 @@ public class HRegionServer extends HasThread implements int storefiles = 0; int storeUncompressedSizeMB = 0; int storefileSizeMB = 0; - int memstoreSizeMB = (int) (r.getMemStoreSize() / 1024 / 1024); + int memstoreSizeMB = (int) (r.getMemStoreDataSize() / 1024 / 1024); long storefileIndexSizeKB = 0; int rootIndexSizeKB = 0; int totalStaticIndexSizeKB = 0; @@ -2793,7 +2793,7 @@ public class HRegionServer extends HasThread implements }); // Copy over all regions. Regions are sorted by size with biggest first. for (HRegion region : this.onlineRegions.values()) { - sortedRegions.put(region.getMemStoreSize(), region); + sortedRegions.put(region.getMemStoreDataSize(), region); } return sortedRegions; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 2b23598629..a23b1e3b70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -399,7 +399,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat @Override public long getMemStoreFlushSize() { // TODO: Why is this in here? The flushsize of the region rather than the store? St.Ack - return this.region.memstoreFlushSize; + return this.region.memStoreFlushHeapSize; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.java index b2c91300f1..e0969b554e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableMemStoreLAB.java @@ -117,4 +117,16 @@ public class ImmutableMemStoreLAB implements MemStoreLAB { checkAndCloseMSLABs(count); } } + + @Override + public boolean isOnHeap() { + return !isOffHeap(); + } + + @Override + public boolean isOffHeap() { + return ChunkCreator.getInstance().isOffheap(); + } + + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index a314848cab..abcfd370a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -148,7 +148,7 @@ class MemStoreFlusher implements FlushRequester { HRegion regionToFlush; if (bestFlushableRegion != null && - bestAnyRegion.getMemStoreSize() > 2 * bestFlushableRegion.getMemStoreSize()) { + bestAnyRegion.getMemStoreDataSize() > 2 * bestFlushableRegion.getMemStoreDataSize()) { // Even if it's not supposed to be flushed, pick a region if it's more than twice // as big as the best flushable one - otherwise when we're under pressure we make // lots of little flushes and cause lots of compactions, etc, which just makes @@ -157,9 +157,9 @@ class MemStoreFlusher implements FlushRequester { LOG.debug("Under global heap pressure: " + "Region " + bestAnyRegion.getRegionInfo().getRegionNameAsString() + " has too many " + "store files, but is " - + TraditionalBinaryPrefix.long2String(bestAnyRegion.getMemStoreSize(), "", 1) + + TraditionalBinaryPrefix.long2String(bestAnyRegion.getMemStoreDataSize(), "", 1) + " vs best flushable region's " - + TraditionalBinaryPrefix.long2String(bestFlushableRegion.getMemStoreSize(), "", 1) + + TraditionalBinaryPrefix.long2String(bestFlushableRegion.getMemStoreDataSize(), "", 1) + ". Choosing the bigger."); } regionToFlush = bestAnyRegion; @@ -172,14 +172,14 @@ class MemStoreFlusher implements FlushRequester { } Preconditions.checkState( - (regionToFlush != null && regionToFlush.getMemStoreSize() > 0) || - (bestRegionReplica != null && bestRegionReplica.getMemStoreSize() > 0)); + (regionToFlush != null && regionToFlush.getMemStoreDataSize() > 0) || + (bestRegionReplica != null && bestRegionReplica.getMemStoreDataSize() > 0)); if (regionToFlush == null || (bestRegionReplica != null && ServerRegionReplicaUtil.isRegionReplicaStoreFileRefreshEnabled(conf) && - (bestRegionReplica.getMemStoreSize() - > secondaryMultiplier * regionToFlush.getMemStoreSize()))) { + (bestRegionReplica.getMemStoreDataSize() + > secondaryMultiplier * regionToFlush.getMemStoreDataSize()))) { LOG.info("Refreshing storefiles of region " + bestRegionReplica + " due to global heap pressure. Total memstore datasize=" + TraditionalBinaryPrefix.long2String( @@ -198,7 +198,7 @@ class MemStoreFlusher implements FlushRequester { TraditionalBinaryPrefix.long2String( server.getRegionServerAccounting().getGlobalMemStoreDataSize(), "", 1) + ", Region memstore size=" + - TraditionalBinaryPrefix.long2String(regionToFlush.getMemStoreSize(), "", 1)); + TraditionalBinaryPrefix.long2String(regionToFlush.getMemStoreDataSize(), "", 1)); flushedOne = flushRegion(regionToFlush, true, false, FlushLifeCycleTracker.DUMMY); if (!flushedOne) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java index 1982b4fb4e..4126a7e426 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java @@ -103,7 +103,7 @@ public interface MemStoreLAB { */ Chunk getNewExternalJumboChunk(int size); - public static MemStoreLAB newInstance(Configuration conf) { + static MemStoreLAB newInstance(Configuration conf) { MemStoreLAB memStoreLAB = null; if (isEnabled(conf)) { String className = conf.get(MSLAB_CLASS_NAME, MemStoreLABImpl.class.getName()); @@ -113,7 +113,11 @@ public interface MemStoreLAB { return memStoreLAB; } - public static boolean isEnabled(Configuration conf) { + static boolean isEnabled(Configuration conf) { return conf.getBoolean(USEMSLAB_KEY, USEMSLAB_DEFAULT); } + + boolean isOnHeap(); + + boolean isOffHeap(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java index 08588d26e8..89367c04fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLABImpl.java @@ -291,6 +291,16 @@ public class MemStoreLABImpl implements MemStoreLAB { return c; } + @Override + public boolean isOnHeap() { + return !isOffHeap(); + } + + @Override + public boolean isOffHeap() { + return this.chunkCreator.isOffheap(); + } + @VisibleForTesting Chunk getCurrentChunk() { return this.curChunk.get(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSize.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSize.java index 557a61a49c..382e6e9069 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSize.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSize.java @@ -27,29 +27,58 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) public class MemStoreSize { + // MemStore size tracks 3 sizes: + // (1) data size: the aggregated size of all key-value not including meta data such as + // index, time range etc. + // (2) heap size: the aggregated size of all data that is allocated on-heap including all + // key-values that reside on-heap and the metadata that resides on-heap + // (3) off-heap size: the aggregated size of all data that is allocated off-heap including all + // key-values that reside off-heap and the metadata that resides off-heap + // + // 3 examples to illustrate their usage: + // Consider a store with 100MB of key-values allocated on-heap and 20MB of metadata allocated + // on-heap. The counters are <100MB, 120MB, 0>, respectively. + // Consider a store with 100MB of key-values allocated off-heap and 20MB of metadata + // allocated on-heap (e.g, CAM index). The counters are <100MB, 20MB, 100MB>, respectively. + // Consider a store with 100MB of key-values from which 95MB are allocated off-heap and 5MB + // are allocated on-heap (e.g., due to upserts) and 20MB of metadata from which 15MB allocated + // off-heap (e.g, CCM index) and 5MB allocated on-heap (e.g, CSLM index in active). + // The counters are <100MB, 10MB, 110MB>, respectively. + /** *'dataSize' tracks the Cell's data bytes size alone (Key bytes, value bytes). A cell's data can * be in on heap or off heap area depending on the MSLAB and its configuration to be using on heap * or off heap LABs */ - protected long dataSize; + protected volatile long dataSize; /** 'heapSize' tracks all Cell's heap size occupancy. This will include Cell POJO heap overhead. * When Cells in on heap area, this will include the cells data size as well. */ - protected long heapSize; + protected volatile long heapSize; + + /** off-heap size: the aggregated size of all data that is allocated off-heap including all + * key-values that reside off-heap and the metadata that resides off-heap + */ + protected volatile long offHeapSize; public MemStoreSize() { - this(0L, 0L); + this(0L, 0L, 0L); } - public MemStoreSize(long dataSize, long heapSize) { + public MemStoreSize(long dataSize, long heapSize, long offHeapSize) { this.dataSize = dataSize; this.heapSize = heapSize; + this.offHeapSize = offHeapSize; } + protected MemStoreSize(MemStoreSize memStoreSize) { + this.dataSize = memStoreSize.dataSize; + this.heapSize = memStoreSize.heapSize; + this.offHeapSize = memStoreSize.offHeapSize; + } public boolean isEmpty() { - return this.dataSize == 0 && this.heapSize == 0; + return this.dataSize == 0 && this.heapSize == 0 && this.offHeapSize == 0; } public long getDataSize() { @@ -60,24 +89,33 @@ public class MemStoreSize { return this.heapSize; } + public long getOffHeapSize() { + return this.offHeapSize; + } + @Override public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) { return false; } MemStoreSize other = (MemStoreSize) obj; - return this.dataSize == other.dataSize && this.heapSize == other.heapSize; + return this.dataSize == other.dataSize + && this.heapSize == other.heapSize + && this.offHeapSize == other.offHeapSize; } @Override public int hashCode() { long h = 13 * this.dataSize; h = h + 14 * this.heapSize; + h = h + 15 * this.offHeapSize; return (int) h; } @Override public String toString() { - return "dataSize=" + this.dataSize + " , heapSize=" + this.heapSize; + return "dataSize=" + this.dataSize + + " , heapSize=" + this.heapSize + + " , offHeapSize=" + this.offHeapSize; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSizing.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSizing.java index b13201d4a2..6c0b0f6e3b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSizing.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSizing.java @@ -28,23 +28,14 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class MemStoreSizing extends MemStoreSize { public static final MemStoreSizing DUD = new MemStoreSizing() { - @Override - public void incMemStoreSize(MemStoreSize delta) { - incMemStoreSize(delta.getDataSize(), delta.getHeapSize()); - } - @Override - public void incMemStoreSize(long dataSizeDelta, long heapSizeDelta) { + @Override public void incMemStoreSize(long dataSizeDelta, long heapSizeDelta, + long offHeapSizeDelta) { throw new RuntimeException("I'm a dud, you can't use me!"); } - @Override - public void decMemStoreSize(MemStoreSize delta) { - decMemStoreSize(delta.getDataSize(), delta.getHeapSize()); - } - - @Override - public void decMemStoreSize(long dataSizeDelta, long heapSizeDelta) { + @Override public void decMemStoreSize(long dataSizeDelta, long heapSizeDelta, + long offHeapSizeDelta) { throw new RuntimeException("I'm a dud, you can't use me!"); } }; @@ -53,51 +44,38 @@ public class MemStoreSizing extends MemStoreSize { super(); } - public MemStoreSizing(long dataSize, long heapSize) { - super(dataSize, heapSize); + public MemStoreSizing(long dataSize, long heapSize, long offHeapSize) { + super(dataSize, heapSize, offHeapSize); } - public void incMemStoreSize(long dataSizeDelta, long heapSizeDelta) { + public MemStoreSizing(MemStoreSizing memStoreSizing) { + super(memStoreSizing); + } + + public void incMemStoreSize(long dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) { this.dataSize += dataSizeDelta; this.heapSize += heapSizeDelta; + this.offHeapSize += offHeapSizeDelta; } public void incMemStoreSize(MemStoreSize delta) { - incMemStoreSize(delta.getDataSize(), delta.getHeapSize()); + incMemStoreSize(delta.getDataSize(), delta.getHeapSize(), delta.getOffHeapSize()); } - public void decMemStoreSize(long dataSizeDelta, long heapSizeDelta) { + public void decMemStoreSize(long dataSizeDelta, long heapSizeDelta, long offHeapSizeDelta) { this.dataSize -= dataSizeDelta; this.heapSize -= heapSizeDelta; + this.heapSize -= offHeapSizeDelta; } public void decMemStoreSize(MemStoreSize delta) { - decMemStoreSize(delta.getDataSize(), delta.getHeapSize()); + decMemStoreSize(delta.getDataSize(), delta.getHeapSize(), delta.getOffHeapSize()); } public void empty() { this.dataSize = 0L; this.heapSize = 0L; + this.offHeapSize = 0L; } - @Override - public boolean equals(Object obj) { - if (obj == null || (getClass() != obj.getClass())) { - return false; - } - MemStoreSizing other = (MemStoreSizing) obj; - return this.dataSize == other.dataSize && this.heapSize == other.heapSize; - } - - @Override - public int hashCode() { - long h = 13 * this.dataSize; - h = h + 14 * this.heapSize; - return (int) h; - } - - @Override - public String toString() { - return "dataSize=" + this.dataSize + " , heapSize=" + this.heapSize; - } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java index 10656fefa8..53e5fa87b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java @@ -72,7 +72,7 @@ public class MetricsTableWrapperAggregateImpl implements MetricsTableWrapperAggr for (Store store : r.getStores()) { tempStorefilesSize += store.getStorefilesSize(); } - metricsTable.setMemStoresSize(metricsTable.getMemStoresSize() + r.getMemStoreSize()); + metricsTable.setMemStoresSize(metricsTable.getMemStoresSize() + r.getMemStoreDataSize()); metricsTable.setStoreFilesSize(metricsTable.getStoreFilesSize() + tempStorefilesSize); metricsTable.setTableSize(metricsTable.getMemStoresSize() + metricsTable.getStoreFilesSize()); metricsTable.setReadRequestsCount(metricsTable.getReadRequestsCount() + r.getReadRequestsCount()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java index 55ed993dc6..f8a54d3a1b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java @@ -44,7 +44,7 @@ public class MutableSegment extends Segment { protected MutableSegment(CellSet cellSet, CellComparator comparator, MemStoreLAB memStoreLAB) { super(cellSet, comparator, memStoreLAB, TimeRangeTracker.create(TimeRangeTracker.Type.SYNC)); - incSize(0,DEEP_OVERHEAD); // update the mutable segment metadata + incSize(0, DEEP_OVERHEAD, 0); // update the mutable segment metadata } /** @@ -88,9 +88,10 @@ public class MutableSegment extends Segment { // removed cell is from MSLAB or not. Will do once HBASE-16438 is in int cellLen = getCellLength(cur); long heapSize = heapSizeChange(cur, true); - this.incSize(-cellLen, -heapSize); + long offHeapSize = offHeapSizeChange(cur, true); + this.incSize(-cellLen, -heapSize, -offHeapSize); if (memStoreSizing != null) { - memStoreSizing.decMemStoreSize(cellLen, heapSize); + memStoreSizing.decMemStoreSize(cellLen, heapSize, offHeapSize); } it.remove(); } else { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java index 75f02a363c..a8eaddd447 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java @@ -150,7 +150,7 @@ public interface Region extends ConfigurationObserver { * the memstores of this Region. Means size in bytes for key, value and tags within Cells. * It wont consider any java heap overhead for the cell objects or any other. */ - long getMemStoreSize(); + long getMemStoreDataSize(); /** @return the number of mutations processed bypassing the WAL */ long getNumMutationsWithoutWAL(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java index 61815a540d..23c3a79597 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java @@ -66,7 +66,7 @@ public class RegionServicesForStores { } public void addMemStoreSize(MemStoreSize size) { - region.addAndGetMemStoreSize(size); + region.incMemStoreSize(size); } public RegionInfo getRegionInfo() { @@ -80,7 +80,7 @@ public class RegionServicesForStores { public ThreadPoolExecutor getInMemoryCompactionPool() { return INMEMORY_COMPACTION_POOL; } public long getMemStoreFlushSize() { - return region.getMemStoreFlushSize(); + return region.getMemStoreFlushHeapSize(); } public int getNumStores() { @@ -89,6 +89,6 @@ public class RegionServicesForStores { @VisibleForTesting long getMemStoreSize() { - return region.getMemStoreSize(); + return region.getMemStoreDataSize(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java index c054666395..dbd3de72cd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java @@ -22,7 +22,6 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.SortedSet; -import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import org.apache.commons.logging.Log; @@ -49,8 +48,8 @@ import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTe public abstract class Segment { public final static long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT - + 6 * ClassSize.REFERENCE // cellSet, comparator, memStoreLAB, dataSize, - // heapSize, and timeRangeTracker + + 5 * ClassSize.REFERENCE // cellSet, comparator, memStoreLAB, memStoreSizing, + // and timeRangeTracker + Bytes.SIZEOF_LONG // minSequenceId + Bytes.SIZEOF_BOOLEAN); // tagsPresent public final static long DEEP_OVERHEAD = FIXED_OVERHEAD + ClassSize.ATOMIC_REFERENCE @@ -62,8 +61,9 @@ public abstract class Segment { private MemStoreLAB memStoreLAB; // Sum of sizes of all Cells added to this Segment. Cell's heapSize is considered. This is not // including the heap overhead of this class. - protected final AtomicLong dataSize; - protected final AtomicLong heapSize; + protected final MemStoreSizing segmentSize; +// protected final AtomicLong dataSize; +// protected final AtomicLong heapSize; protected final TimeRangeTracker timeRangeTracker; protected volatile boolean tagsPresent; @@ -71,8 +71,9 @@ public abstract class Segment { // and there is no need in true Segments state protected Segment(CellComparator comparator, TimeRangeTracker trt) { this.comparator = comparator; - this.dataSize = new AtomicLong(0); - this.heapSize = new AtomicLong(0); + this.segmentSize = new MemStoreSizing(); +// this.dataSize = new AtomicLong(0); +// this.heapSize = new AtomicLong(0); this.timeRangeTracker = trt; } @@ -82,8 +83,9 @@ public abstract class Segment { this.comparator = comparator; this.minSequenceId = Long.MAX_VALUE; this.memStoreLAB = memStoreLAB; - this.dataSize = new AtomicLong(0); - this.heapSize = new AtomicLong(0); + this.segmentSize = new MemStoreSizing(); +// this.dataSize = new AtomicLong(0); +// this.heapSize = new AtomicLong(0); this.tagsPresent = false; this.timeRangeTracker = trt; } @@ -93,8 +95,9 @@ public abstract class Segment { this.comparator = segment.getComparator(); this.minSequenceId = segment.getMinSequenceId(); this.memStoreLAB = segment.getMemStoreLAB(); - this.dataSize = new AtomicLong(segment.keySize()); - this.heapSize = new AtomicLong(segment.heapSize.get()); + this.segmentSize = new MemStoreSizing(segment.getMemStoreSizing()); +// this.dataSize = new AtomicLong(segment.keySize()); +// this.heapSize = new AtomicLong(segment.heapSize.get()); this.tagsPresent = segment.isTagsPresent(); this.timeRangeTracker = segment.getTimeRangeTracker(); } @@ -134,17 +137,6 @@ public abstract class Segment { } /** - * @return the first cell in the segment that has equal or greater key than the given cell - */ - public Cell getFirstAfter(Cell cell) { - SortedSet snTailSet = tailSet(cell); - if (!snTailSet.isEmpty()) { - return snTailSet.first(); - } - return null; - } - - /** * Closing a segment before it is being discarded */ public void close() { @@ -210,27 +202,39 @@ public abstract class Segment { return this; } + public MemStoreSizing getMemStoreSizing() { + return this.segmentSize; + } + /** * @return Sum of all cell's size. */ public long keySize() { - return this.dataSize.get(); + return this.segmentSize.getDataSize(); } /** * @return The heap size of this segment. */ public long heapSize() { - return this.heapSize.get(); + return this.segmentSize.getHeapSize(); + } + + /** + * @return The off-heap size of this segment. + */ + public long offHeapSize() { + return this.segmentSize.getOffHeapSize(); } /** * Updates the size counters of the segment by the given delta */ //TODO - protected void incSize(long delta, long heapOverhead) { - this.dataSize.addAndGet(delta); - this.heapSize.addAndGet(heapOverhead); + protected void incSize(long delta, long heapOverhead, long offHeapOverhead) { + synchronized (this) { + this.segmentSize.incMemStoreSize(delta, heapOverhead, offHeapOverhead); + } } public long getMinSequenceId() { @@ -292,9 +296,10 @@ public abstract class Segment { cellSize = getCellLength(cellToAdd); } long heapSize = heapSizeChange(cellToAdd, succ); - incSize(cellSize, heapSize); + long offHeapSize = offHeapSizeChange(cellToAdd, succ); + incSize(cellSize, heapSize, offHeapSize); if (memstoreSizing != null) { - memstoreSizing.incMemStoreSize(cellSize, heapSize); + memstoreSizing.incMemStoreSize(cellSize, heapSize, offHeapSize); } getTimeRangeTracker().includeTimestamp(cellToAdd); minSequenceId = Math.min(minSequenceId, cellToAdd.getSequenceId()); @@ -316,10 +321,48 @@ public abstract class Segment { * heap size itself and additional overhead because of addition on to CSLM. */ protected long heapSizeChange(Cell cell, boolean succ) { + long res = 0; + if (succ) { + boolean onHeap = true; + MemStoreLAB memStoreLAB = getMemStoreLAB(); + if(memStoreLAB != null) { + onHeap = memStoreLAB.isOnHeap(); + } + res += indexEntryOnHeapSize(onHeap); + if(onHeap) { + res += PrivateCellUtil.estimatedSizeOfCell(cell); + } + res = ClassSize.align(res); + } + return res; + } + + protected long offHeapSizeChange(Cell cell, boolean succ) { + long res = 0; if (succ) { - return ClassSize - .align(indexEntrySize() + PrivateCellUtil.estimatedHeapSizeOf(cell)); + boolean offHeap = false; + MemStoreLAB memStoreLAB = getMemStoreLAB(); + if(memStoreLAB != null) { + offHeap = memStoreLAB.isOffHeap(); + } + res += indexEntryOffHeapSize(offHeap); + if(offHeap) { + res += PrivateCellUtil.estimatedSizeOfCell(cell); + } + res = ClassSize.align(res); } + return res; + } + + protected long indexEntryOnHeapSize(boolean onHeap) { + // in most cases index is allocated on-heap + // override this method when it is not always the case, e.g., in CCM + return indexEntrySize(); + } + + protected long indexEntryOffHeapSize(boolean offHeap) { + // in most cases index is allocated on-heap + // override this method when it is not always the case, e.g., in CCM return 0; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 6abca134d7..663d52e057 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -601,7 +601,7 @@ public class StoreScanner extends NonReversedNonLazyKeyValueScanner // Update the progress of the scanner context scannerContext.incrementSizeProgress(cellSize, - PrivateCellUtil.estimatedHeapSizeOf(cell)); + PrivateCellUtil.estimatedSizeOfCell(cell)); scannerContext.incrementBatchProgress(1); if (matcher.isUserScan() && totalBytesRead > maxRowSize) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java index 2feb3565a9..2b0a1bd644 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALEdit.java @@ -167,7 +167,7 @@ public class WALEdit implements HeapSize { public long heapSize() { long ret = ClassSize.ARRAYLIST; for (Cell cell : cells) { - ret += PrivateCellUtil.estimatedHeapSizeOf(cell); + ret += PrivateCellUtil.estimatedSizeOfCell(cell); } return ret; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java index 34e8c3c00d..142095be10 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java @@ -95,7 +95,7 @@ public class TestGlobalMemStoreSize { long globalMemStoreSize = 0; for (RegionInfo regionInfo : ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) { - globalMemStoreSize += server.getRegion(regionInfo.getEncodedName()).getMemStoreSize(); + globalMemStoreSize += server.getRegion(regionInfo.getEncodedName()).getMemStoreDataSize(); } assertEquals(server.getRegionServerAccounting().getGlobalMemStoreDataSize(), globalMemStoreSize); @@ -126,7 +126,7 @@ public class TestGlobalMemStoreSize { for (RegionInfo regionInfo : ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) { HRegion r = server.getRegion(regionInfo.getEncodedName()); - long l = r.getMemStoreSize(); + long l = r.getMemStoreDataSize(); if (l > 0) { // Only meta could have edits at this stage. Give it another flush // clear them. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java index 3b2b6fed90..d4951f4196 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java @@ -386,7 +386,7 @@ public class TestPartialResultsFromClientSide { // Estimate the cell heap size. One difference is that on server side, the KV Heap size is // estimated differently in case the cell is backed up by MSLAB byte[] (no overhead for // backing array). Thus below calculation is a bit brittle. - CELL_HEAP_SIZE = PrivateCellUtil.estimatedHeapSizeOf(result.rawCells()[0]) + CELL_HEAP_SIZE = PrivateCellUtil.estimatedSizeOfCell(result.rawCells()[0]) - (ClassSize.ARRAY+3); if (LOG.isInfoEnabled()) LOG.info("Cell heap size: " + CELL_HEAP_SIZE); scanner.close(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java index 26398210e7..a918589fb4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerSideScanMetricsFromClientSide.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics; import org.apache.hadoop.hbase.filter.BinaryComparator; import org.apache.hadoop.hbase.filter.ColumnPrefixFilter; -import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.FilterList; import org.apache.hadoop.hbase.filter.FilterList.Operator; @@ -144,7 +143,7 @@ public class TestServerSideScanMetricsFromClientSide { assertTrue(result.rawCells() != null); assertTrue(result.rawCells().length == 1); - CELL_HEAP_SIZE = PrivateCellUtil.estimatedHeapSizeOf(result.rawCells()[0]); + CELL_HEAP_SIZE = PrivateCellUtil.estimatedSizeOfCell(result.rawCells()[0]); scanner.close(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java index 1e3af40d54..e04b3a7715 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java @@ -227,30 +227,30 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { ASYNC_CONN.getTable(tableName) .put(new Put(hri.getStartKey()).addColumn(FAMILY, FAMILY_0, Bytes.toBytes("value-1"))) .join(); - Assert.assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0); + Assert.assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize() > 0); // flush region and wait flush operation finished. LOG.info("flushing region: " + Bytes.toStringBinary(hri.getRegionName())); admin.flushRegion(hri.getRegionName()).get(); LOG.info("blocking until flush is complete: " + Bytes.toStringBinary(hri.getRegionName())); Threads.sleepWithoutInterrupt(500); - while (regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0) { + while (regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize() > 0) { Threads.sleep(50); } // check the memstore. - Assert.assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize(), 0); + Assert.assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize(), 0); // write another put into the specific region ASYNC_CONN.getTable(tableName) .put(new Put(hri.getStartKey()).addColumn(FAMILY, FAMILY_0, Bytes.toBytes("value-2"))) .join(); - Assert.assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0); + Assert.assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize() > 0); admin.flush(tableName).get(); Threads.sleepWithoutInterrupt(500); - while (regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0) { + while (regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize() > 0) { Threads.sleep(50); } // check the memstore. - Assert.assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize(), 0); + Assert.assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize(), 0); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java index 12c7faeb39..1089549ce9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java @@ -105,7 +105,7 @@ public class TestClientPushback { mutator.flush(); // get the current load on RS. Hopefully memstore isn't flushed since we wrote the the data - int load = (int) ((((HRegion) region).addAndGetMemStoreSize(new MemStoreSize(0, 0)) * 100) + int load = (int) ((((HRegion) region).getMemStoreDataSize() * 100) / flushSizeBytes); LOG.debug("Done writing some data to "+tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java index 30b3d71a85..8880a4b221 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java @@ -97,7 +97,7 @@ public class TestNegativeMemStoreSizeWithSlowCoprocessor { if (Bytes.equals(put.getRow(), Bytes.toBytes("row2"))) { region.flush(false); - Assert.assertTrue(region.addAndGetMemStoreSize(new MemStoreSize()) >= 0); + Assert.assertTrue(region.getMemStoreDataSize() >= 0); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java index 0f18deeab4..2766b78109 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java @@ -693,7 +693,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { int totalCellsLen1 = addRowsByKeys(memstore, keys1);// Adding 4 cells. int oneCellOnCSLMHeapSize = 120; int oneCellOnCAHeapSize = 88; - assertEquals(totalCellsLen1, region.getMemStoreSize()); + assertEquals(totalCellsLen1, region.getMemStoreDataSize()); long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 4 * oneCellOnCSLMHeapSize; assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize()); ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact @@ -774,7 +774,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { int totalCellsLen1 = addRowsByKeys(memstore, keys1);// Adding 3 cells. int oneCellOnCSLMHeapSize = 120; - assertEquals(totalCellsLen1, region.getMemStoreSize()); + assertEquals(totalCellsLen1, region.getMemStoreDataSize()); long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 3 * oneCellOnCSLMHeapSize; assertEquals(totalHeapSize, memstore.heapSize()); @@ -832,7 +832,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { LOG.debug("added kv: " + kv.getKeyString() + ", timestamp:" + kv.getTimestamp()); } regionServicesForStores.addMemStoreSize(new MemStoreSize(hmc.getActive().keySize() - size, - hmc.getActive().heapSize() - heapOverhead)); + hmc.getActive().heapSize() - heapOverhead, 0)); return totalLen; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java index b4cf4ec342..131087cc17 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java @@ -210,7 +210,7 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore long cellBeforeFlushSize = cellBeforeFlushSize(); long cellAfterFlushSize = cellAfterFlushSize(); long totalHeapSize1 = MutableSegment.DEEP_OVERHEAD + 4 * cellBeforeFlushSize; - assertEquals(totalCellsLen1, region.getMemStoreSize()); + assertEquals(totalCellsLen1, region.getMemStoreDataSize()); assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize()); MemStoreSize size = memstore.getFlushableSize(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java index fa4d7f0672..9df84ae1dc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java @@ -315,7 +315,7 @@ public class TestEndToEndSplitTransaction { admin.flushRegion(regionName); log("blocking until flush is complete: " + Bytes.toStringBinary(regionName)); Threads.sleepWithoutInterrupt(500); - while (rs.getOnlineRegion(regionName).getMemStoreSize() > 0) { + while (rs.getOnlineRegion(regionName).getMemStoreDataSize() > 0) { Threads.sleep(50); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index d538b15bbe..64605210d8 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -305,7 +305,7 @@ public class TestHRegion { region.put(put); // Close with something in memstore and something in the snapshot. Make sure all is cleared. region.close(); - assertEquals(0, region.getMemStoreSize()); + assertEquals(0, region.getMemStoreDataSize()); HBaseTestingUtility.closeRegionAndWAL(region); } @@ -389,17 +389,17 @@ public class TestHRegion { HRegion region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, hLog, COLUMN_FAMILY_BYTES); HStore store = region.getStore(COLUMN_FAMILY_BYTES); - assertEquals(0, region.getMemStoreSize()); + assertEquals(0, region.getMemStoreDataSize()); // Put some value and make sure flush could be completed normally byte [] value = Bytes.toBytes(method); Put put = new Put(value); put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value); region.put(put); - long onePutSize = region.getMemStoreSize(); + long onePutSize = region.getMemStoreDataSize(); assertTrue(onePutSize > 0); region.flush(true); - assertEquals("memstoreSize should be zero", 0, region.getMemStoreSize()); + assertEquals("memstoreSize should be zero", 0, region.getMemStoreDataSize()); assertEquals("flushable size should be zero", 0, store.getFlushableSize().getDataSize()); // save normalCPHost and replaced by mockedCPHost, which will cancel flush requests @@ -412,14 +412,14 @@ public class TestHRegion { region.setCoprocessorHost(mockedCPHost); region.put(put); region.flush(true); - assertEquals("memstoreSize should NOT be zero", onePutSize, region.getMemStoreSize()); + assertEquals("memstoreSize should NOT be zero", onePutSize, region.getMemStoreDataSize()); assertEquals("flushable size should NOT be zero", onePutSize, store.getFlushableSize().getDataSize()); // set normalCPHost and flush again, the snapshot will be flushed region.setCoprocessorHost(normalCPHost); region.flush(true); - assertEquals("memstoreSize should be zero", 0, region.getMemStoreSize()); + assertEquals("memstoreSize should be zero", 0, region.getMemStoreDataSize()); assertEquals("flushable size should be zero", 0, store.getFlushableSize().getDataSize()); HBaseTestingUtility.closeRegionAndWAL(region); } @@ -433,14 +433,14 @@ public class TestHRegion { HRegion region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, hLog, COLUMN_FAMILY_BYTES); HStore store = region.getStore(COLUMN_FAMILY_BYTES); - assertEquals(0, region.getMemStoreSize()); + assertEquals(0, region.getMemStoreDataSize()); // Put one value byte [] value = Bytes.toBytes(method); Put put = new Put(value); put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value); region.put(put); - long onePutSize = region.getMemStoreSize(); + long onePutSize = region.getMemStoreDataSize(); assertTrue(onePutSize > 0); RegionCoprocessorHost mockedCPHost = Mockito.mock(RegionCoprocessorHost.class); @@ -456,7 +456,7 @@ public class TestHRegion { } catch (IOException expected) { } long expectedSize = onePutSize * 2; - assertEquals("memstoreSize should be incremented", expectedSize, region.getMemStoreSize()); + assertEquals("memstoreSize should be incremented", expectedSize, region.getMemStoreDataSize()); assertEquals("flushable size should be incremented", expectedSize, store.getFlushableSize().getDataSize()); @@ -501,13 +501,13 @@ public class TestHRegion { // Initialize region region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); - long size = region.getMemStoreSize(); + long size = region.getMemStoreDataSize(); Assert.assertEquals(0, size); // Put one item into memstore. Measure the size of one item in memstore. Put p1 = new Put(row); p1.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual1, 1, (byte[]) null)); region.put(p1); - final long sizeOfOnePut = region.getMemStoreSize(); + final long sizeOfOnePut = region.getMemStoreDataSize(); // Fail a flush which means the current memstore will hang out as memstore 'snapshot'. try { LOG.info("Flushing"); @@ -520,7 +520,7 @@ public class TestHRegion { // Make it so all writes succeed from here on out ffs.fault.set(false); // Check sizes. Should still be the one entry. - Assert.assertEquals(sizeOfOnePut, region.getMemStoreSize()); + Assert.assertEquals(sizeOfOnePut, region.getMemStoreDataSize()); // Now add two entries so that on this next flush that fails, we can see if we // subtract the right amount, the snapshot size only. Put p2 = new Put(row); @@ -528,13 +528,13 @@ public class TestHRegion { p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual3, 3, (byte[])null)); region.put(p2); long expectedSize = sizeOfOnePut * 3; - Assert.assertEquals(expectedSize, region.getMemStoreSize()); + Assert.assertEquals(expectedSize, region.getMemStoreDataSize()); // Do a successful flush. It will clear the snapshot only. Thats how flushes work. // If already a snapshot, we clear it else we move the memstore to be snapshot and flush // it region.flush(true); // Make sure our memory accounting is right. - Assert.assertEquals(sizeOfOnePut * 2, region.getMemStoreSize()); + Assert.assertEquals(sizeOfOnePut * 2, region.getMemStoreDataSize()); } finally { HBaseTestingUtility.closeRegionAndWAL(region); } @@ -566,7 +566,7 @@ public class TestHRegion { // Initialize region region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); - long size = region.getMemStoreSize(); + long size = region.getMemStoreDataSize(); Assert.assertEquals(0, size); // Put one item into memstore. Measure the size of one item in memstore. Put p1 = new Put(row); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java index bab5b2673b..fb853a91f9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java @@ -362,7 +362,7 @@ public class TestHRegionReplayEvents { verifyData(secondaryRegion, 0, lastReplayed, cq, families); HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1")); long storeMemstoreSize = store.getMemStoreSize().getHeapSize(); - long regionMemstoreSize = secondaryRegion.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemStoreDataSize(); long storeFlushableSize = store.getFlushableSize().getHeapSize(); long storeSize = store.getSize(); long storeSizeUncompressed = store.getStoreSizeUncompressed(); @@ -391,7 +391,7 @@ public class TestHRegionReplayEvents { assertTrue(storeFlushableSize > newFlushableSize); // assert that the region memstore is smaller now - long newRegionMemstoreSize = secondaryRegion.getMemStoreSize(); + long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertTrue(regionMemstoreSize > newRegionMemstoreSize); // assert that the store sizes are bigger @@ -461,7 +461,7 @@ public class TestHRegionReplayEvents { // first verify that everything is replayed and visible before flush event replay HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1")); long storeMemstoreSize = store.getMemStoreSize().getHeapSize(); - long regionMemstoreSize = secondaryRegion.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemStoreDataSize(); long storeFlushableSize = store.getFlushableSize().getHeapSize(); if (flushDesc.getAction() == FlushAction.START_FLUSH) { @@ -501,7 +501,7 @@ public class TestHRegionReplayEvents { assertNotNull(secondaryRegion.getPrepareFlushResult()); assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId, startFlushDesc.getFlushSequenceNumber()); - assertTrue(secondaryRegion.getMemStoreSize() > 0); // memstore is not empty + assertTrue(secondaryRegion.getMemStoreDataSize() > 0); // memstore is not empty verifyData(secondaryRegion, 0, numRows, cq, families); // Test case 2: replay a flush start marker with a smaller seqId @@ -514,7 +514,7 @@ public class TestHRegionReplayEvents { assertNotNull(secondaryRegion.getPrepareFlushResult()); assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId, startFlushDesc.getFlushSequenceNumber()); - assertTrue(secondaryRegion.getMemStoreSize() > 0); // memstore is not empty + assertTrue(secondaryRegion.getMemStoreDataSize() > 0); // memstore is not empty verifyData(secondaryRegion, 0, numRows, cq, families); // Test case 3: replay a flush start marker with a larger seqId @@ -527,7 +527,7 @@ public class TestHRegionReplayEvents { assertNotNull(secondaryRegion.getPrepareFlushResult()); assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId, startFlushDesc.getFlushSequenceNumber()); - assertTrue(secondaryRegion.getMemStoreSize() > 0); // memstore is not empty + assertTrue(secondaryRegion.getMemStoreDataSize() > 0); // memstore is not empty verifyData(secondaryRegion, 0, numRows, cq, families); LOG.info("-- Verifying edits from secondary"); @@ -596,7 +596,7 @@ public class TestHRegionReplayEvents { for (HStore s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount, s.getStorefilesCount()); } - long regionMemstoreSize = secondaryRegion.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemStoreDataSize(); // Test case 1: replay the a flush commit marker smaller than what we have prepared LOG.info("Testing replaying flush COMMIT " + commitFlushDesc + " on top of flush START" @@ -616,7 +616,7 @@ public class TestHRegionReplayEvents { assertTrue(newFlushableSize > 0); // assert that the memstore is not dropped // assert that the region memstore is same as before - long newRegionMemstoreSize = secondaryRegion.getMemStoreSize(); + long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertEquals(regionMemstoreSize, newRegionMemstoreSize); assertNotNull(secondaryRegion.getPrepareFlushResult()); // not dropped @@ -686,7 +686,7 @@ public class TestHRegionReplayEvents { for (HStore s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount, s.getStorefilesCount()); } - long regionMemstoreSize = secondaryRegion.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemStoreDataSize(); // Test case 1: replay the a flush commit marker larger than what we have prepared LOG.info("Testing replaying flush COMMIT " + commitFlushDesc + " on top of flush START" @@ -706,7 +706,7 @@ public class TestHRegionReplayEvents { assertTrue(newFlushableSize > 0); // assert that the memstore is not dropped // assert that the region memstore is smaller than before, but not empty - long newRegionMemstoreSize = secondaryRegion.getMemStoreSize(); + long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertTrue(newRegionMemstoreSize > 0); assertTrue(regionMemstoreSize > newRegionMemstoreSize); @@ -787,7 +787,7 @@ public class TestHRegionReplayEvents { for (HStore s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount, s.getStorefilesCount()); } - long regionMemstoreSize = secondaryRegion.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemStoreDataSize(); // Test case 1: replay a flush commit marker without start flush marker assertNull(secondaryRegion.getPrepareFlushResult()); @@ -816,7 +816,7 @@ public class TestHRegionReplayEvents { } // assert that the region memstore is same as before (we could not drop) - long newRegionMemstoreSize = secondaryRegion.getMemStoreSize(); + long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); if (droppableMemstore) { assertTrue(0 == newRegionMemstoreSize); } else { @@ -886,7 +886,7 @@ public class TestHRegionReplayEvents { for (HStore s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount, s.getStorefilesCount()); } - long regionMemstoreSize = secondaryRegion.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertTrue(regionMemstoreSize == 0); // now replay the region open event that should contain new file locations @@ -903,7 +903,7 @@ public class TestHRegionReplayEvents { assertTrue(newFlushableSize == MutableSegment.DEEP_OVERHEAD); // assert that the region memstore is empty - long newRegionMemstoreSize = secondaryRegion.getMemStoreSize(); + long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertTrue(newRegionMemstoreSize == 0); assertNull(secondaryRegion.getPrepareFlushResult()); //prepare snapshot should be dropped if any @@ -982,7 +982,7 @@ public class TestHRegionReplayEvents { assertTrue(newSnapshotSize.getDataSize() == 0); // assert that the region memstore is empty - long newRegionMemstoreSize = secondaryRegion.getMemStoreSize(); + long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertTrue(newRegionMemstoreSize == 0); assertNull(secondaryRegion.getPrepareFlushResult()); //prepare snapshot should be dropped if any @@ -1432,7 +1432,7 @@ public class TestHRegionReplayEvents { LOG.info("-- Replaying edits in secondary"); // Test case 4: replay some edits, ensure that memstore is dropped. - assertTrue(secondaryRegion.getMemStoreSize() == 0); + assertTrue(secondaryRegion.getMemStoreDataSize() == 0); putDataWithFlushes(primaryRegion, 400, 400, 0); numRows = 400; @@ -1450,11 +1450,11 @@ public class TestHRegionReplayEvents { } } - assertTrue(secondaryRegion.getMemStoreSize() > 0); + assertTrue(secondaryRegion.getMemStoreDataSize() > 0); secondaryRegion.refreshStoreFiles(); - assertTrue(secondaryRegion.getMemStoreSize() == 0); + assertTrue(secondaryRegion.getMemStoreDataSize() == 0); LOG.info("-- Verifying edits from primary"); verifyData(primaryRegion, 0, numRows, cq, families); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java index 6ec091a5c4..f01ce90a81 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java @@ -259,7 +259,7 @@ public class TestHStore { MemStoreSizing kvSize = new MemStoreSizing(); store.add(new KeyValue(row, family, qf1, 1, (byte[]) null), kvSize); // add the heap size of active (mutable) segment - kvSize.incMemStoreSize(0, MutableSegment.DEEP_OVERHEAD); + kvSize.incMemStoreSize(0, MutableSegment.DEEP_OVERHEAD, 0); size = store.memstore.getFlushableSize(); assertEquals(kvSize, size); // Flush. Bug #1 from HBASE-10466. Make sure size calculation on failed flush is right. @@ -272,12 +272,12 @@ public class TestHStore { } // due to snapshot, change mutable to immutable segment kvSize.incMemStoreSize(0, - CSLMImmutableSegment.DEEP_OVERHEAD_CSLM-MutableSegment.DEEP_OVERHEAD); + CSLMImmutableSegment.DEEP_OVERHEAD_CSLM-MutableSegment.DEEP_OVERHEAD, 0); size = store.memstore.getFlushableSize(); assertEquals(kvSize, size); MemStoreSizing kvSize2 = new MemStoreSizing(); store.add(new KeyValue(row, family, qf2, 2, (byte[])null), kvSize2); - kvSize2.incMemStoreSize(0, MutableSegment.DEEP_OVERHEAD); + kvSize2.incMemStoreSize(0, MutableSegment.DEEP_OVERHEAD, 0); // Even though we add a new kv, we expect the flushable size to be 'same' since we have // not yet cleared the snapshot -- the above flush failed. assertEquals(kvSize, size); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java index b8155e4571..a7af7e0e31 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java @@ -142,7 +142,7 @@ public class TestPerColumnFamilyFlush { } } - long totalMemstoreSize = region.getMemStoreSize(); + long totalMemstoreSize = region.getMemStoreDataSize(); // Find the smallest LSNs for edits wrt to each CF. long smallestSeqCF1 = region.getOldestSeqIdOfStore(FAMILY1); @@ -185,7 +185,7 @@ public class TestPerColumnFamilyFlush { cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize(); cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize(); cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize(); - totalMemstoreSize = region.getMemStoreSize(); + totalMemstoreSize = region.getMemStoreDataSize(); smallestSeqInRegionCurrentMemstore = getWAL(region) .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); @@ -223,7 +223,7 @@ public class TestPerColumnFamilyFlush { cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize(); cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize(); cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize(); - totalMemstoreSize = region.getMemStoreSize(); + totalMemstoreSize = region.getMemStoreDataSize(); smallestSeqInRegionCurrentMemstore = getWAL(region) .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); @@ -259,7 +259,7 @@ public class TestPerColumnFamilyFlush { // Since we won't find any CF above the threshold, and hence no specific // store to flush, we should flush all the memstores. - assertEquals(0, region.getMemStoreSize()); + assertEquals(0, region.getMemStoreDataSize()); HBaseTestingUtility.closeRegionAndWAL(region); } @@ -283,7 +283,7 @@ public class TestPerColumnFamilyFlush { } } - long totalMemstoreSize = region.getMemStoreSize(); + long totalMemstoreSize = region.getMemStoreDataSize(); // Find the sizes of the memstores of each CF. MemStoreSize cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize(); @@ -306,7 +306,7 @@ public class TestPerColumnFamilyFlush { cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize(); cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize(); cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize(); - totalMemstoreSize = region.getMemStoreSize(); + totalMemstoreSize = region.getMemStoreDataSize(); long smallestSeqInRegionCurrentMemstore = region.getWAL().getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); @@ -375,7 +375,7 @@ public class TestPerColumnFamilyFlush { long totalMemstoreSize; long cf1MemstoreSize, cf2MemstoreSize, cf3MemstoreSize; - totalMemstoreSize = desiredRegion.getMemStoreSize(); + totalMemstoreSize = desiredRegion.getMemStoreDataSize(); // Find the sizes of the memstores of each CF. cf1MemstoreSize = desiredRegion.getStore(FAMILY1).getMemStoreSize().getDataSize(); @@ -498,12 +498,12 @@ public class TestPerColumnFamilyFlush { @Override public boolean evaluate() throws Exception { - return desiredRegion.getMemStoreSize() == 0; + return desiredRegion.getMemStoreDataSize() == 0; } @Override public String explainFailure() throws Exception { - long memstoreSize = desiredRegion.getMemStoreSize(); + long memstoreSize = desiredRegion.getMemStoreDataSize(); if (memstoreSize > 0) { return "Still have unflushed entries in memstore, memstore size is " + memstoreSize; } @@ -545,7 +545,7 @@ public class TestPerColumnFamilyFlush { put.addColumn(FAMILY3, qf, value3); table.put(put); // slow down to let regionserver flush region. - while (region.getMemStoreSize() > memstoreFlushSize) { + while (region.getMemStoreDataSize() > memstoreFlushSize) { Thread.sleep(100); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java index 0122674ce1..a07deeb8bc 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java @@ -37,7 +37,7 @@ public class TestRegionServerAccounting { // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); MemStoreSize memstoreSize = - new MemStoreSize((long) (3l * 1024l * 1024l * 1024l), (long) (1l * 1024l * 1024l * 1024l)); + new MemStoreSize((3L * 1024L * 1024L * 1024L), (1L * 1024L * 1024L * 1024L), 0); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_ONHEAP_HIGHER_MARK, regionServerAccounting.isAboveHighWaterMark()); @@ -50,7 +50,7 @@ public class TestRegionServerAccounting { // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); MemStoreSize memstoreSize = - new MemStoreSize((long) (3l * 1024l * 1024l * 1024l), (long) (1l * 1024l * 1024l * 1024l)); + new MemStoreSize((3L * 1024L * 1024L * 1024L), (1L * 1024L * 1024L * 1024L), 0); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_ONHEAP_LOWER_MARK, regionServerAccounting.isAboveLowWaterMark()); @@ -60,12 +60,12 @@ public class TestRegionServerAccounting { public void testOffheapMemstoreHigherWaterMarkLimitsDueToDataSize() { Configuration conf = HBaseConfiguration.create(); // setting 1G as offheap data size - conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1l * 1024l)); + conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024L)); // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); // this will breach offheap limit as data size is higher and not due to heap size MemStoreSize memstoreSize = - new MemStoreSize((long) (3l * 1024l * 1024l * 1024l), (long) (1l * 1024l * 1024l * 1024l)); + new MemStoreSize((3L * 1024L * 1024L * 1024L), 0, (1L * 1024L * 1024L * 1024L)); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_OFFHEAP_HIGHER_MARK, regionServerAccounting.isAboveHighWaterMark()); @@ -76,12 +76,12 @@ public class TestRegionServerAccounting { Configuration conf = HBaseConfiguration.create(); conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_KEY, 0.2f); // setting 1G as offheap data size - conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1l * 1024l)); + conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024L)); // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); // this will breach higher limit as heap size is higher and not due to offheap size MemStoreSize memstoreSize = - new MemStoreSize((long) (3l * 1024l * 1024l), (long) (2l * 1024l * 1024l * 1024l)); + new MemStoreSize((3L * 1024L * 1024L), (2L * 1024L * 1024L * 1024L), 0); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_ONHEAP_HIGHER_MARK, regionServerAccounting.isAboveHighWaterMark()); @@ -91,12 +91,12 @@ public class TestRegionServerAccounting { public void testOffheapMemstoreLowerWaterMarkLimitsDueToDataSize() { Configuration conf = HBaseConfiguration.create(); // setting 1G as offheap data size - conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1l * 1024l)); + conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024L)); // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); // this will breach offheap limit as data size is higher and not due to heap size MemStoreSize memstoreSize = - new MemStoreSize((long) (3l * 1024l * 1024l * 1024l), (long) (1l * 1024l * 1024l * 1024l)); + new MemStoreSize((3L * 1024L * 1024L * 1024L), 0, (1L * 1024L * 1024L * 1024L)); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_OFFHEAP_LOWER_MARK, regionServerAccounting.isAboveLowWaterMark()); @@ -107,12 +107,12 @@ public class TestRegionServerAccounting { Configuration conf = HBaseConfiguration.create(); conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_KEY, 0.2f); // setting 1G as offheap data size - conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1l * 1024l)); + conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024L)); // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); // this will breach higher limit as heap size is higher and not due to offheap size MemStoreSize memstoreSize = - new MemStoreSize((long) (3l * 1024l * 1024l), (long) (2l * 1024l * 1024l * 1024l)); + new MemStoreSize((3L * 1024L * 1024L), (2L * 1024L * 1024L * 1024L), 0); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_ONHEAP_LOWER_MARK, regionServerAccounting.isAboveLowWaterMark()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java index 2e5c5525a9..5f64ae5e7a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java @@ -258,8 +258,8 @@ public class TestWALLockup { Thread t = new Thread ("Flusher") { public void run() { try { - if (region.getMemStoreSize() <= 0) { - throw new IOException("memstore size=" + region.getMemStoreSize()); + if (region.getMemStoreDataSize() <= 0) { + throw new IOException("memstore size=" + region.getMemStoreDataSize()); } region.flush(false); } catch (IOException e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java index 6a647964db..acac984148 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java @@ -161,7 +161,7 @@ public class TestWalAndCompactingMemStoreFlush { region.put(createPut(2, i)); } - long totalMemstoreSize = region.getMemStoreSize(); + long totalMemstoreSize = region.getMemStoreDataSize(); // Find the smallest LSNs for edits wrt to each CF. long smallestSeqCF1PhaseI = region.getOldestSeqIdOfStore(FAMILY1); @@ -352,13 +352,13 @@ public class TestWalAndCompactingMemStoreFlush { s = s + "----AFTER THIRD AND FORTH FLUSH, The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseV + ". After additional inserts and last flush, the entire region size is:" + region - .getMemStoreSize() + .getMemStoreDataSize() + "\n----------------------------------\n"; // Since we won't find any CF above the threshold, and hence no specific // store to flush, we should flush all the memstores // Also compacted memstores are flushed to disk. - assertEquals(0, region.getMemStoreSize()); + assertEquals(0, region.getMemStoreDataSize()); System.out.println(s); HBaseTestingUtility.closeRegionAndWAL(region); } @@ -404,7 +404,7 @@ public class TestWalAndCompactingMemStoreFlush { /*------------------------------------------------------------------------------*/ /*------------------------------------------------------------------------------*/ /* PHASE I - collect sizes */ - long totalMemstoreSizePhaseI = region.getMemStoreSize(); + long totalMemstoreSizePhaseI = region.getMemStoreDataSize(); // Find the smallest LSNs for edits wrt to each CF. long smallestSeqCF1PhaseI = region.getOldestSeqIdOfStore(FAMILY1); long smallestSeqCF2PhaseI = region.getOldestSeqIdOfStore(FAMILY2); @@ -467,7 +467,7 @@ public class TestWalAndCompactingMemStoreFlush { .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); // Find the smallest LSNs for edits wrt to each CF. long smallestSeqCF3PhaseII = region.getOldestSeqIdOfStore(FAMILY3); - long totalMemstoreSizePhaseII = region.getMemStoreSize(); + long totalMemstoreSizePhaseII = region.getMemStoreDataSize(); /*------------------------------------------------------------------------------*/ /* PHASE II - validation */ @@ -510,7 +510,7 @@ public class TestWalAndCompactingMemStoreFlush { /* PHASE III - collect sizes */ // How much does the CF1 memstore occupy now? Will be used later. MemStoreSize cf1MemstoreSizePhaseIII = region.getStore(FAMILY1).getMemStoreSize(); - long totalMemstoreSizePhaseIII = region.getMemStoreSize(); + long totalMemstoreSizePhaseIII = region.getMemStoreDataSize(); /*------------------------------------------------------------------------------*/ /* PHASE III - validation */ @@ -568,7 +568,7 @@ public class TestWalAndCompactingMemStoreFlush { MemStoreSize cf3MemstoreSizePhaseV = region.getStore(FAMILY3).getMemStoreSize(); long smallestSeqInRegionCurrentMemstorePhaseV = getWAL(region) .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); - long totalMemstoreSizePhaseV = region.getMemStoreSize(); + long totalMemstoreSizePhaseV = region.getMemStoreDataSize(); /*------------------------------------------------------------------------------*/ /* PHASE V - validation */ @@ -660,7 +660,7 @@ public class TestWalAndCompactingMemStoreFlush { ((CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore).setCompositeSnapshot(false); ((CompactingMemStore) ((HStore) region.getStore(FAMILY3)).memstore).setCompositeSnapshot(false); - long totalMemstoreSize = region.getMemStoreSize(); + long totalMemstoreSize = region.getMemStoreDataSize(); // Find the sizes of the memstores of each CF. MemStoreSize cf1MemstoreSizePhaseI = region.getStore(FAMILY1).getMemStoreSize(); @@ -795,7 +795,7 @@ public class TestWalAndCompactingMemStoreFlush { region.put(createPut(2, i)); } - long totalMemstoreSize = region.getMemStoreSize(); + long totalMemstoreSize = region.getMemStoreDataSize(); // Find the sizes of the memstores of each CF. MemStoreSize cf1MemstoreSizePhaseI = region.getStore(FAMILY1).getMemStoreSize(); -- 2.13.6 (Apple Git-96)