From c747dc4c57a5eb1126854330fa9dbcd04d03921e Mon Sep 17 00:00:00 2001 From: eshcar Date: Wed, 20 Dec 2017 12:24:13 +0200 Subject: [PATCH] HBASE-18294 Reduce global heap pressure: flush based on heap occupancy --- .../hbase/regionserver/CompactingMemStore.java | 8 +- .../hbase/regionserver/CompactionPipeline.java | 17 ++- .../regionserver/CompositeImmutableSegment.java | 8 -- .../hadoop/hbase/regionserver/DefaultMemStore.java | 4 +- .../regionserver/FlushAllLargeStoresPolicy.java | 2 +- .../hbase/regionserver/FlushLargeStoresPolicy.java | 30 ++--- .../FlushNonSloppyStoresFirstPolicy.java | 2 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 144 +++++++++++---------- .../hadoop/hbase/regionserver/HRegionServer.java | 4 +- .../apache/hadoop/hbase/regionserver/HStore.java | 2 +- .../hadoop/hbase/regionserver/MemStoreFlusher.java | 17 +-- .../hadoop/hbase/regionserver/MemStoreLAB.java | 4 +- .../hadoop/hbase/regionserver/MemStoreSize.java | 8 +- .../hadoop/hbase/regionserver/MemStoreSizing.java | 41 +----- .../MetricsTableWrapperAggregateImpl.java | 2 +- .../apache/hadoop/hbase/regionserver/Region.java | 9 +- .../hbase/regionserver/RegionServerAccounting.java | 22 ++-- .../regionserver/RegionServicesForStores.java | 6 +- .../apache/hadoop/hbase/regionserver/Segment.java | 41 +++--- .../hadoop/hbase/TestGlobalMemStoreSize.java | 4 +- .../hbase/client/TestAsyncRegionAdminApi.java | 12 +- .../hadoop/hbase/client/TestClientPushback.java | 3 +- ...estNegativeMemStoreSizeWithSlowCoprocessor.java | 3 +- .../hbase/regionserver/TestCompactingMemStore.java | 4 +- .../TestCompactingToCellFlatMapMemStore.java | 2 +- .../regionserver/TestEndToEndSplitTransaction.java | 2 +- .../hadoop/hbase/regionserver/TestHRegion.java | 20 +-- .../regionserver/TestHRegionReplayEvents.java | 36 +++--- .../regionserver/TestPerColumnFamilyFlush.java | 21 ++- .../regionserver/TestRegionServerAccounting.java | 20 +-- .../hadoop/hbase/regionserver/TestWALLockup.java | 4 +- .../TestWalAndCompactingMemStoreFlush.java | 18 +-- 32 files changed, 247 insertions(+), 273 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java index 7b885ff1af..d4eaf6e09a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java @@ -149,9 +149,9 @@ public class CompactingMemStore extends AbstractMemStore { @Override public MemStoreSize size() { MemStoreSizing memstoreSizing = new MemStoreSizing(); - memstoreSizing.incMemStoreSize(this.active.keySize(), this.active.heapSize()); + memstoreSizing.incMemStoreSize(active.getMemStoreSize()); for (Segment item : pipeline.getSegments()) { - memstoreSizing.incMemStoreSize(item.keySize(), item.heapSize()); + memstoreSizing.incMemStoreSize(item.getMemStoreSize()); } return memstoreSizing; } @@ -223,13 +223,13 @@ public class CompactingMemStore extends AbstractMemStore { // if snapshot is empty the tail of the pipeline (or everything in the memstore) is flushed if (compositeSnapshot) { snapshotSizing = pipeline.getPipelineSizing(); - snapshotSizing.incMemStoreSize(this.active.keySize(), this.active.heapSize()); + snapshotSizing.incMemStoreSize(active.getMemStoreSize()); } else { snapshotSizing = pipeline.getTailSizing(); } } return snapshotSizing.getDataSize() > 0 ? snapshotSizing - : new MemStoreSize(this.active.keySize(), this.active.heapSize()); + : new MemStoreSize(active.getMemStoreSize()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java index 49abe72fa3..a1d9ed5603 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java @@ -143,9 +143,11 @@ public class CompactionPipeline { long dataSizeDelta = suffixDataSize - newDataSize; long suffixHeapSize = getSegmentsHeapSize(suffix); long newHeapSize = 0; - if(segment != null) newHeapSize = segment.heapSize(); + if(segment != null) { + newHeapSize = segment.heapSize(); + } long heapSizeDelta = suffixHeapSize - newHeapSize; - region.addMemStoreSize(new MemStoreSizing(-dataSizeDelta, -heapSizeDelta)); + region.addMemStoreSize(new MemStoreSize(-dataSizeDelta, -heapSizeDelta)); if (LOG.isDebugEnabled()) { LOG.debug("Suffix data size: " + suffixDataSize + " new segment data size: " + newDataSize + ". Suffix heap size: " + suffixHeapSize @@ -242,19 +244,16 @@ public class CompactionPipeline { public MemStoreSizing getTailSizing() { LinkedList localCopy = readOnlyCopy; if (localCopy.isEmpty()) return new MemStoreSizing(); - return new MemStoreSizing(localCopy.peekLast().keySize(), localCopy.peekLast().heapSize()); + return new MemStoreSizing(localCopy.peekLast().getMemStoreSize()); } public MemStoreSizing getPipelineSizing() { - long keySize = 0; - long heapSize = 0; + MemStoreSizing memStoreSizing = new MemStoreSizing(); LinkedList localCopy = readOnlyCopy; - if (localCopy.isEmpty()) return new MemStoreSizing(); for (Segment segment : localCopy) { - keySize += segment.keySize(); - heapSize += segment.heapSize(); + memStoreSizing.incMemStoreSize(segment.getMemStoreSize()); } - return new MemStoreSizing(keySize, heapSize); + return memStoreSizing; } private void swapSuffix(List suffix, ImmutableSegment segment, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java index bf9ff13cba..dbc0012b94 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java @@ -86,14 +86,6 @@ public class CompositeImmutableSegment extends ImmutableSegment { return result; } - /** - * @return the first cell in the segment that has equal or greater key than the given cell - */ - @Override - public Cell getFirstAfter(Cell cell) { - throw new IllegalStateException("Not supported by CompositeImmutableScanner"); - } - /** * Closing a segment before it is being discarded */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index 061e4d0733..9ef6a6cc2b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -106,7 +106,7 @@ public class DefaultMemStore extends AbstractMemStore { public MemStoreSize getFlushableSize() { MemStoreSize snapshotSize = getSnapshotSize(); return snapshotSize.getDataSize() > 0 ? snapshotSize - : new MemStoreSize(keySize(), heapSize()); + : new MemStoreSize(active.getMemStoreSize()); } @Override @@ -155,7 +155,7 @@ public class DefaultMemStore extends AbstractMemStore { @Override public MemStoreSize size() { - return new MemStoreSize(this.active.keySize(), this.active.heapSize()); + return new MemStoreSize(active.getMemStoreSize()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java index 0f0117899f..902cc8ff51 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java @@ -44,7 +44,7 @@ public class FlushAllLargeStoresPolicy extends FlushLargeStoresPolicy { // Family number might also be zero in some of our unit test case return; } - this.flushSizeLowerBound = getFlushSizeLowerBound(region); + setFlushSizeLowerBound(region); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java index 74bde60397..12a0c45df9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushLargeStoresPolicy.java @@ -40,18 +40,18 @@ public abstract class FlushLargeStoresPolicy extends FlushPolicy { public static final long DEFAULT_HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN = 1024 * 1024 * 16L; - protected long flushSizeLowerBound = -1; + protected long flushHeapSizeLowerBound = -1; - protected long getFlushSizeLowerBound(HRegion region) { + protected void setFlushSizeLowerBound(HRegion region) { int familyNumber = region.getTableDescriptor().getColumnFamilyCount(); // For multiple families, lower bound is the "average flush size" by default // unless setting in configuration is larger. - long flushSizeLowerBound = region.getMemStoreFlushSize() / familyNumber; + flushHeapSizeLowerBound = region.getMemStoreFlushHeapSize() / familyNumber; long minimumLowerBound = getConf().getLong(HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, DEFAULT_HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN); - if (minimumLowerBound > flushSizeLowerBound) { - flushSizeLowerBound = minimumLowerBound; + if (minimumLowerBound > flushHeapSizeLowerBound) { + flushHeapSizeLowerBound = minimumLowerBound; } // use the setting in table description if any String flushedSizeLowerBoundString = @@ -60,30 +60,30 @@ public abstract class FlushLargeStoresPolicy extends FlushPolicy { if (LOG.isDebugEnabled()) { LOG.debug("No " + HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND + " set in description of table " + region.getTableDescriptor().getTableName() - + ", use config (" + flushSizeLowerBound + ") instead"); + + ", use config (flushHeapSizeLowerBound=" + flushHeapSizeLowerBound+ ") instead"); } } else { try { - flushSizeLowerBound = Long.parseLong(flushedSizeLowerBoundString); + flushHeapSizeLowerBound = Long.parseLong(flushedSizeLowerBoundString); } catch (NumberFormatException nfe) { // fall back for fault setting LOG.warn("Number format exception when parsing " + HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND + " for table " - + region.getTableDescriptor().getTableName() + ":" + flushedSizeLowerBoundString + ". " + nfe - + ", use config (" + flushSizeLowerBound + ") instead"); + + region.getTableDescriptor().getTableName() + ":" + flushedSizeLowerBoundString + + ". " + nfe + + ", use config (flushHeapSizeLowerBound=" + flushHeapSizeLowerBound+") instead"); } } - return flushSizeLowerBound; } protected boolean shouldFlush(HStore store) { - if (store.getMemStoreSize().getDataSize() > this.flushSizeLowerBound) { + if (store.getMemStoreSize().getHeapSize() > this.flushHeapSizeLowerBound) { if (LOG.isDebugEnabled()) { - LOG.debug("Flush Column Family " + store.getColumnFamilyName() + " of " + - region.getRegionInfo().getEncodedName() + " because memstoreSize=" + - store.getMemStoreSize().getDataSize() + " > lower bound=" - + this.flushSizeLowerBound); + LOG.debug("Flush Column Family " + store.getColumnFamilyName() + " of " + + region.getRegionInfo().getEncodedName() + " because memstore size=" + + store.getMemStoreSize().toString() + + " (flush heap lower bound=" + this.flushHeapSizeLowerBound + ")"); } return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java index ed23e3d9d8..e64b5225ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java @@ -63,7 +63,7 @@ public class FlushNonSloppyStoresFirstPolicy extends FlushLargeStoresPolicy { @Override protected void configureForRegion(HRegion region) { super.configureForRegion(region); - this.flushSizeLowerBound = getFlushSizeLowerBound(region); + setFlushSizeLowerBound(region); for (HStore store : region.stores.values()) { if (store.isSloppyMemStore()) { sloppyStores.add(store); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 6401a8b957..4b87f24f64 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -63,7 +63,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.LongAdder; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; @@ -285,7 +284,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // TODO: account for each registered handler in HeapSize computation private Map coprocessorServiceHandlers = Maps.newHashMap(); - private final AtomicLong memstoreDataSize = new AtomicLong(0);// Track data size in all memstores + // Track data size in all memstores + private final MemStoreSizing memStoreSize = new MemStoreSizing(); private final RegionServicesForStores regionServicesForStores = new RegionServicesForStores(this); // Debug possible data loss due to WAL off @@ -632,7 +632,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi final WriteState writestate = new WriteState(); - long memstoreFlushSize; + long memStoreFlushHeapSize; final long timestampSlop; final long rowProcessorTimeout; @@ -644,7 +644,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi private long flushCheckInterval; // flushPerChanges is to prevent too many changes in memstore private long flushPerChanges; - private long blockingMemStoreSize; + private long blockingMemStoreHeapSize; // Used to guard closes final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); @@ -829,10 +829,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi flushSize = conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE); } - this.memstoreFlushSize = flushSize; - this.blockingMemStoreSize = this.memstoreFlushSize * - conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, - HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER); + this.memStoreFlushHeapSize = flushSize; + long mult = conf.getLong(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, + HConstants.DEFAULT_HREGION_MEMSTORE_BLOCK_MULTIPLIER); + this.blockingMemStoreHeapSize = this.memStoreFlushHeapSize * mult; } /** @@ -1194,32 +1194,38 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi /** * Increase the size of mem store in this region and the size of global mem * store - * @return the size of memstore in this region */ - public long addAndGetMemStoreSize(MemStoreSize memstoreSize) { + public void incMemStoreSize(MemStoreSize memStoreSize) { if (this.rsAccounting != null) { - rsAccounting.incGlobalMemStoreSize(memstoreSize); + rsAccounting.incGlobalMemStoreSize(memStoreSize); } - long size = this.memstoreDataSize.addAndGet(memstoreSize.getDataSize()); - checkNegativeMemStoreDataSize(size, memstoreSize.getDataSize()); - return size; + long dataSize; + synchronized (this.memStoreSize) { + this.memStoreSize.incMemStoreSize(memStoreSize); + dataSize = this.memStoreSize.getDataSize(); + } + checkNegativeMemStoreDataSize(dataSize, memStoreSize.getDataSize()); } - public void decrMemStoreSize(MemStoreSize memstoreSize) { + public void decrMemStoreSize(MemStoreSize memStoreSize) { if (this.rsAccounting != null) { - rsAccounting.decGlobalMemStoreSize(memstoreSize); + rsAccounting.decGlobalMemStoreSize(memStoreSize); + } + long size; + synchronized (this.memStoreSize) { + this.memStoreSize.decMemStoreSize(memStoreSize); + size = this.memStoreSize.getDataSize(); } - long size = this.memstoreDataSize.addAndGet(-memstoreSize.getDataSize()); - checkNegativeMemStoreDataSize(size, -memstoreSize.getDataSize()); + checkNegativeMemStoreDataSize(size, -memStoreSize.getDataSize()); } - private void checkNegativeMemStoreDataSize(long memstoreDataSize, long delta) { - // This is extremely bad if we make memstoreSize negative. Log as much info on the offending + private void checkNegativeMemStoreDataSize(long memStoreDataSize, long delta) { + // This is extremely bad if we make memStoreSize negative. Log as much info on the offending // caller as possible. (memStoreSize might be a negative value already -- freeing memory) - if (memstoreDataSize < 0) { + if (memStoreDataSize < 0) { LOG.error("Asked to modify this region's (" + this.toString() - + ") memstoreSize to a negative value which is incorrect. Current memstoreSize=" - + (memstoreDataSize - delta) + ", delta=" + delta, new Exception()); + + ") memStoreSize to a negative value which is incorrect. Current memStoreSize=" + + (memStoreDataSize - delta) + ", delta=" + delta, new Exception()); } } @@ -1252,8 +1258,13 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } @Override - public long getMemStoreSize() { - return memstoreDataSize.get(); + public long getMemStoreDataSize() { + return memStoreSize.getDataSize(); + } + + @Override + public long getMemStoreHeapSize() { + return memStoreSize.getHeapSize(); } /** @return store services for this region, to access services required by store level needs */ @@ -1523,7 +1534,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi int failedfFlushCount = 0; int flushCount = 0; long tmp = 0; - long remainingSize = this.memstoreDataSize.get(); + long remainingSize = this.memStoreSize.getDataSize(); while (remainingSize > 0) { try { internalFlushcache(status); @@ -1532,7 +1543,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi " (carrying snapshot?) " + this); } flushCount++; - tmp = this.memstoreDataSize.get(); + tmp = this.memStoreSize.getDataSize(); if (tmp >= remainingSize) { failedfFlushCount++; } @@ -1572,7 +1583,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi getRegionServerServices().abort("Assertion failed while closing store " + getRegionInfo().getRegionNameAsString() + " " + store + ". flushableSize expected=0, actual= " + flushableSize - + ". Current memstoreSize=" + getMemStoreSize() + ". Maybe a coprocessor " + + ". Current memStoreSize=" + getMemStoreDataSize() + ". Maybe a coprocessor " + "operation failed and left the memstore in a partially updated state.", null); } } @@ -1615,9 +1626,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi this.closed.set(true); if (!canFlush) { - this.decrMemStoreSize(new MemStoreSizing(memstoreDataSize.get(), getMemStoreHeapSize())); - } else if (memstoreDataSize.get() != 0) { - LOG.error("Memstore size is " + memstoreDataSize.get()); + this.decrMemStoreSize(new MemStoreSize(memStoreSize)); + } else if (memStoreSize.getDataSize() != 0) { + LOG.error("Memstore data size is " + memStoreSize.getDataSize()); } if (coprocessorHost != null) { status.setStatus("Running coprocessor post-close hooks"); @@ -1637,10 +1648,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } } - private long getMemStoreHeapSize() { - return stores.values().stream().mapToLong(s -> s.getMemStoreSize().getHeapSize()).sum(); - } - /** Wait for all current flushes and compactions of the region to complete */ // TODO HBASE-18906. Check the usage (if any) in Phoenix and expose this or give alternate way for // Phoenix needs. @@ -1754,7 +1761,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi * @return True if its worth doing a flush before we put up the close flag. */ private boolean worthPreFlushing() { - return this.memstoreDataSize.get() > + return this.memStoreSize.getDataSize() > this.conf.getLong("hbase.hregion.preclose.flush.size", 1024 * 1024 * 5); } @@ -2372,12 +2379,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // bulk loaded file between memory and existing hfiles. It wants a good seqeunceId that belongs // to no other that it can use to associate with the bulk load. Hence this little dance below // to go get one. - if (this.memstoreDataSize.get() <= 0) { + if (this.memStoreSize.getDataSize() <= 0) { // Take an update lock so no edits can come into memory just yet. this.updatesLock.writeLock().lock(); WriteEntry writeEntry = null; try { - if (this.memstoreDataSize.get() <= 0) { + if (this.memStoreSize.getDataSize() <= 0) { // Presume that if there are still no edits in the memstore, then there are no edits for // this region out in the WAL subsystem so no need to do any trickery clearing out // edits in the WAL sub-system. Up the sequence number so the resulting flush id is for @@ -2513,8 +2520,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi .append(StringUtils.byteDesc(store.getFlushableSize().getDataSize())); } } - LOG.info("Flushing " + + storesToFlush.size() + "/" + stores.size() + - " column families, memstore=" + StringUtils.byteDesc(this.memstoreDataSize.get()) + + LOG.info("Flushing " + + storesToFlush.size() + "/" + stores.size() + " column families," + + " memstore data size=" + StringUtils.byteDesc(this.memStoreSize.getDataSize()) + + " memstore heap size=" + StringUtils.byteDesc(this.memStoreSize.getHeapSize()) + ((perCfExtras != null && perCfExtras.length() > 0)? perCfExtras.toString(): "") + ((wal != null) ? "" : "; WAL is null, using passed sequenceid=" + sequenceId)); } @@ -2701,11 +2709,13 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } long time = EnvironmentEdgeManager.currentTime() - startTime; - long memstoresize = this.memstoreDataSize.get(); - String msg = "Finished memstore flush of ~" - + StringUtils.byteDesc(prepareResult.totalFlushableSize.getDataSize()) + "/" - + prepareResult.totalFlushableSize.getDataSize() + ", currentsize=" - + StringUtils.byteDesc(memstoresize) + "/" + memstoresize + long flushableDataSize = prepareResult.totalFlushableSize.getDataSize(); + long flushableHeapSize = prepareResult.totalFlushableSize.getHeapSize(); + long memstoresize = this.memStoreSize.getDataSize(); + String msg = "Finished memstore flush." + + " Flushed data size ~" + StringUtils.byteDesc(flushableDataSize) + "/" + flushableDataSize + + " Flushed Heap size ~" + StringUtils.byteDesc(flushableHeapSize) + "/" + flushableHeapSize + + ", currentsize=" + StringUtils.byteDesc(memstoresize) + "/" + memstoresize + " for region " + this + " in " + time + "ms, sequenceid=" + flushOpSeqId + ", compaction requested=" + compactionRequested + ((wal == null) ? "; wal=null" : ""); @@ -3039,7 +3049,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi return true; }); // update memStore size - region.addAndGetMemStoreSize(memStoreAccounting); + region.incMemStoreSize(memStoreAccounting); } public boolean isDone() { @@ -3806,8 +3816,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi initialized = true; } doMiniBatchMutate(batchOp); - long newSize = this.getMemStoreSize(); - requestFlushIfNeeded(newSize); + requestFlushIfNeeded(); } } finally { batchOp.closeRegionOperation(); @@ -4167,7 +4176,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // If catalog region, do not impose resource constraints or block updates. if (this.getRegionInfo().isMetaRegion()) return; - if (this.memstoreDataSize.get() > this.blockingMemStoreSize) { + if (this.memStoreSize.getHeapSize() > this.blockingMemStoreHeapSize) { blockedRequestsCount.increment(); requestFlush(); throw new RegionTooBusyException("Above memstore limit, " + @@ -4175,8 +4184,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi this.getRegionInfo().getRegionNameAsString()) + ", server=" + (this.getRegionServerServices() == null ? "unknown" : this.getRegionServerServices().getServerName()) + - ", memstoreSize=" + memstoreDataSize.get() + - ", blockingMemStoreSize=" + blockingMemStoreSize); + ", memStoreSize=" + memStoreSize + + ", blockingMemStoreHeapSize=" + blockingMemStoreHeapSize + ); } } @@ -4294,8 +4304,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi * @param size * @return True if size is over the flush threshold */ - private boolean isFlushSize(final long size) { - return size > this.memstoreFlushSize; + private boolean isFlushSize(MemStoreSize size) { + return size.getHeapSize() > getMemStoreFlushHeapSize(); } /** @@ -4586,7 +4596,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi rsAccounting.addRegionReplayEditsSize(getRegionInfo().getRegionName(), memstoreSize); } - flush = isFlushSize(this.addAndGetMemStoreSize(memstoreSize)); + incMemStoreSize(memstoreSize); + flush = isFlushSize(this.memStoreSize); if (flush) { internalFlushcache(null, currentEditSeqId, stores.values(), status, false, FlushLifeCycleTracker.DUMMY); @@ -7220,8 +7231,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi return null; } ClientProtos.RegionLoadStats.Builder stats = ClientProtos.RegionLoadStats.newBuilder(); - stats.setMemStoreLoad((int) (Math.min(100, (this.memstoreDataSize.get() * 100) / this - .memstoreFlushSize))); + stats.setMemStoreLoad((int) (Math.min(100, (this.memStoreSize.getHeapSize() * 100) / this + .memStoreFlushHeapSize))); if (rsServices.getHeapMemoryManager() != null) { // the HeapMemoryManager uses -0.0 to signal a problem asking the JVM, // so we could just do the calculation below and we'll get a 0. @@ -7368,8 +7379,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } finally { closeRegionOperation(); if (!mutations.isEmpty()) { - long newSize = this.addAndGetMemStoreSize(memstoreAccounting); - requestFlushIfNeeded(newSize); + this.incMemStoreSize(memstoreAccounting); + requestFlushIfNeeded(); } } } @@ -7522,7 +7533,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi rowLock.release(); } // Request a cache flush if over the limit. Do it outside update lock. - if (isFlushSize(addAndGetMemStoreSize(memstoreAccounting))) { + incMemStoreSize(memstoreAccounting); + if (isFlushSize(memStoreSize)) { requestFlush(); } closeRegionOperation(op); @@ -7833,7 +7845,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi public static final long FIXED_OVERHEAD = ClassSize.align( ClassSize.OBJECT + ClassSize.ARRAY + - 50 * ClassSize.REFERENCE + 3 * Bytes.SIZEOF_INT + + 51 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT + (14 * Bytes.SIZEOF_LONG) + 3 * Bytes.SIZEOF_BOOLEAN); @@ -7850,7 +7862,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi public static final long DEEP_OVERHEAD = FIXED_OVERHEAD + ClassSize.OBJECT + // closeLock (2 * ClassSize.ATOMIC_BOOLEAN) + // closed, closing - (4 * ClassSize.ATOMIC_LONG) + // memStoreSize, numPutsWithoutWAL, dataInMemoryWithoutWAL, + (3 * ClassSize.ATOMIC_LONG) + // numPutsWithoutWAL, dataInMemoryWithoutWAL, // compactionsFailed (2 * ClassSize.CONCURRENT_HASHMAP) + // lockedRows, scannerReadPoints WriteState.HEAP_SIZE + // writestate @@ -8352,8 +8364,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi : CellComparatorImpl.COMPARATOR; } - public long getMemStoreFlushSize() { - return this.memstoreFlushSize; + public long getMemStoreFlushHeapSize() { + return this.memStoreFlushHeapSize; } //// method for debugging tests @@ -8371,7 +8383,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } buf.append("end-of-stores"); buf.append(", memstore size "); - buf.append(getMemStoreSize()); + buf.append(getMemStoreDataSize()); if (getRegionInfo().getRegionNameAsString().startsWith(regionName)) { throw new RuntimeException(buf.toString()); } @@ -8402,8 +8414,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi RpcServer.getRequestUser().orElse(null)); } - private void requestFlushIfNeeded(long memstoreTotalSize) throws RegionTooBusyException { - if (memstoreTotalSize > this.getMemStoreFlushSize()) { + private void requestFlushIfNeeded() throws RegionTooBusyException { + if (getMemStoreHeapSize() > this.getMemStoreFlushHeapSize()) { requestFlush(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 8ead08c7c4..e7d76cadba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1612,7 +1612,7 @@ public class HRegionServer extends HasThread implements int storefiles = 0; int storeUncompressedSizeMB = 0; int storefileSizeMB = 0; - int memstoreSizeMB = (int) (r.getMemStoreSize() / 1024 / 1024); + int memstoreSizeMB = (int) (r.getMemStoreDataSize() / 1024 / 1024); long storefileIndexSizeKB = 0; int rootLevelIndexSizeKB = 0; int totalStaticIndexSizeKB = 0; @@ -2797,7 +2797,7 @@ public class HRegionServer extends HasThread implements }); // Copy over all regions. Regions are sorted by size with biggest first. for (HRegion region : this.onlineRegions.values()) { - sortedRegions.put(region.getMemStoreSize(), region); + sortedRegions.put(region.getMemStoreDataSize(), region); } return sortedRegions; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 95ca9dc302..2e92e687cf 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -399,7 +399,7 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat @Override public long getMemStoreFlushSize() { // TODO: Why is this in here? The flushsize of the region rather than the store? St.Ack - return this.region.memstoreFlushSize; + return this.region.memStoreFlushHeapSize; } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index 9e352ef428..fa5cdaf846 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -148,7 +148,7 @@ class MemStoreFlusher implements FlushRequester { HRegion regionToFlush; if (bestFlushableRegion != null && - bestAnyRegion.getMemStoreSize() > 2 * bestFlushableRegion.getMemStoreSize()) { + bestAnyRegion.getMemStoreDataSize() > 2 * bestFlushableRegion.getMemStoreDataSize()) { // Even if it's not supposed to be flushed, pick a region if it's more than twice // as big as the best flushable one - otherwise when we're under pressure we make // lots of little flushes and cause lots of compactions, etc, which just makes @@ -157,9 +157,10 @@ class MemStoreFlusher implements FlushRequester { LOG.debug("Under global heap pressure: " + "Region " + bestAnyRegion.getRegionInfo().getRegionNameAsString() + " has too many " + "store files, but is " - + TraditionalBinaryPrefix.long2String(bestAnyRegion.getMemStoreSize(), "", 1) + + TraditionalBinaryPrefix.long2String(bestAnyRegion.getMemStoreDataSize(), "", 1) + " vs best flushable region's " - + TraditionalBinaryPrefix.long2String(bestFlushableRegion.getMemStoreSize(), "", 1) + + TraditionalBinaryPrefix.long2String( + bestFlushableRegion.getMemStoreDataSize(), "", 1) + ". Choosing the bigger."); } regionToFlush = bestAnyRegion; @@ -172,14 +173,14 @@ class MemStoreFlusher implements FlushRequester { } Preconditions.checkState( - (regionToFlush != null && regionToFlush.getMemStoreSize() > 0) || - (bestRegionReplica != null && bestRegionReplica.getMemStoreSize() > 0)); + (regionToFlush != null && regionToFlush.getMemStoreDataSize() > 0) || + (bestRegionReplica != null && bestRegionReplica.getMemStoreDataSize() > 0)); if (regionToFlush == null || (bestRegionReplica != null && ServerRegionReplicaUtil.isRegionReplicaStoreFileRefreshEnabled(conf) && - (bestRegionReplica.getMemStoreSize() - > secondaryMultiplier * regionToFlush.getMemStoreSize()))) { + (bestRegionReplica.getMemStoreDataSize() + > secondaryMultiplier * regionToFlush.getMemStoreDataSize()))) { LOG.info("Refreshing storefiles of region " + bestRegionReplica + " due to global heap pressure. Total memstore datasize=" + TraditionalBinaryPrefix.long2String( @@ -198,7 +199,7 @@ class MemStoreFlusher implements FlushRequester { TraditionalBinaryPrefix.long2String( server.getRegionServerAccounting().getGlobalMemStoreDataSize(), "", 1) + ", Region memstore size=" + - TraditionalBinaryPrefix.long2String(regionToFlush.getMemStoreSize(), "", 1)); + TraditionalBinaryPrefix.long2String(regionToFlush.getMemStoreDataSize(), "", 1)); flushedOne = flushRegion(regionToFlush, true, false, FlushLifeCycleTracker.DUMMY); if (!flushedOne) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java index 1982b4fb4e..165a2686fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreLAB.java @@ -103,7 +103,7 @@ public interface MemStoreLAB { */ Chunk getNewExternalJumboChunk(int size); - public static MemStoreLAB newInstance(Configuration conf) { + static MemStoreLAB newInstance(Configuration conf) { MemStoreLAB memStoreLAB = null; if (isEnabled(conf)) { String className = conf.get(MSLAB_CLASS_NAME, MemStoreLABImpl.class.getName()); @@ -113,7 +113,7 @@ public interface MemStoreLAB { return memStoreLAB; } - public static boolean isEnabled(Configuration conf) { + static boolean isEnabled(Configuration conf) { return conf.getBoolean(USEMSLAB_KEY, USEMSLAB_DEFAULT); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSize.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSize.java index 557a61a49c..c187834e84 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSize.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSize.java @@ -32,12 +32,12 @@ public class MemStoreSize { * be in on heap or off heap area depending on the MSLAB and its configuration to be using on heap * or off heap LABs */ - protected long dataSize; + protected volatile long dataSize; /** 'heapSize' tracks all Cell's heap size occupancy. This will include Cell POJO heap overhead. * When Cells in on heap area, this will include the cells data size as well. */ - protected long heapSize; + protected volatile long heapSize; public MemStoreSize() { this(0L, 0L); @@ -48,6 +48,10 @@ public class MemStoreSize { this.heapSize = heapSize; } + protected MemStoreSize(MemStoreSize memStoreSize) { + this.dataSize = memStoreSize.dataSize; + this.heapSize = memStoreSize.heapSize; + } public boolean isEmpty() { return this.dataSize == 0 && this.heapSize == 0; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSizing.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSizing.java index b13201d4a2..0a84d64b4e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSizing.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreSizing.java @@ -28,23 +28,12 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class MemStoreSizing extends MemStoreSize { public static final MemStoreSizing DUD = new MemStoreSizing() { - @Override - public void incMemStoreSize(MemStoreSize delta) { - incMemStoreSize(delta.getDataSize(), delta.getHeapSize()); - } - @Override - public void incMemStoreSize(long dataSizeDelta, long heapSizeDelta) { + @Override public void incMemStoreSize(long dataSizeDelta, long heapSizeDelta) { throw new RuntimeException("I'm a dud, you can't use me!"); } - @Override - public void decMemStoreSize(MemStoreSize delta) { - decMemStoreSize(delta.getDataSize(), delta.getHeapSize()); - } - - @Override - public void decMemStoreSize(long dataSizeDelta, long heapSizeDelta) { + @Override public void decMemStoreSize(long dataSizeDelta, long heapSizeDelta) { throw new RuntimeException("I'm a dud, you can't use me!"); } }; @@ -57,6 +46,10 @@ public class MemStoreSizing extends MemStoreSize { super(dataSize, heapSize); } + public MemStoreSizing(MemStoreSize memStoreSize) { + super(memStoreSize); + } + public void incMemStoreSize(long dataSizeDelta, long heapSizeDelta) { this.dataSize += dataSizeDelta; this.heapSize += heapSizeDelta; @@ -80,24 +73,4 @@ public class MemStoreSizing extends MemStoreSize { this.heapSize = 0L; } - @Override - public boolean equals(Object obj) { - if (obj == null || (getClass() != obj.getClass())) { - return false; - } - MemStoreSizing other = (MemStoreSizing) obj; - return this.dataSize == other.dataSize && this.heapSize == other.heapSize; - } - - @Override - public int hashCode() { - long h = 13 * this.dataSize; - h = h + 14 * this.heapSize; - return (int) h; - } - - @Override - public String toString() { - return "dataSize=" + this.dataSize + " , heapSize=" + this.heapSize; - } -} +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java index 10656fefa8..53e5fa87b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregateImpl.java @@ -72,7 +72,7 @@ public class MetricsTableWrapperAggregateImpl implements MetricsTableWrapperAggr for (Store store : r.getStores()) { tempStorefilesSize += store.getStorefilesSize(); } - metricsTable.setMemStoresSize(metricsTable.getMemStoresSize() + r.getMemStoreSize()); + metricsTable.setMemStoresSize(metricsTable.getMemStoresSize() + r.getMemStoreDataSize()); metricsTable.setStoreFilesSize(metricsTable.getStoreFilesSize() + tempStorefilesSize); metricsTable.setTableSize(metricsTable.getMemStoresSize() + metricsTable.getStoreFilesSize()); metricsTable.setReadRequestsCount(metricsTable.getReadRequestsCount() + r.getReadRequestsCount()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java index 75f02a363c..01aa6caa86 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java @@ -150,7 +150,14 @@ public interface Region extends ConfigurationObserver { * the memstores of this Region. Means size in bytes for key, value and tags within Cells. * It wont consider any java heap overhead for the cell objects or any other. */ - long getMemStoreSize(); + long getMemStoreDataSize(); + + /** + * @return memstore heap size for this region, in bytes. It accounts data size of cells + * added to the memstores of this Region, as well as java heap overhead for the cell + * objects or any other. + */ + long getMemStoreHeapSize(); /** @return the number of mutations processed bypassing the WAL */ long getNumMutationsWithoutWAL(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java index 7689fcd61b..ce4ed47ae2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerAccounting.java @@ -31,17 +31,17 @@ import org.apache.hadoop.hbase.util.Pair; /** * RegionServerAccounting keeps record of some basic real time information about - * the Region Server. Currently, it keeps record the global memstore size and global memstore heap - * overhead. It also tracks the replay edits per region. + * the Region Server. Currently, it keeps record the global memstore size and global memstore + * on-heap and off-heap overhead. It also tracks the replay edits per region. */ @InterfaceAudience.Private public class RegionServerAccounting { // memstore data size - private final LongAdder globalMemstoreDataSize = new LongAdder(); + private final LongAdder globalMemStoreDataSize = new LongAdder(); // memstore heap size. When off heap MSLAB in place, this will be only heap overhead of the Cell // POJOs and entry overhead of them onto memstore. When on heap MSLAB, this will be include heap // overhead as well as the cell data size. Ya cell data is in on heap area only then. - private final LongAdder globalMemstoreHeapSize = new LongAdder(); + private final LongAdder globalMemStoreHeapSize = new LongAdder(); // Store the edits size during replaying WAL. Use this to roll back the // global memstore size once a region opening failed. @@ -114,14 +114,14 @@ public class RegionServerAccounting { * @return the global Memstore data size in the RegionServer */ public long getGlobalMemStoreDataSize() { - return globalMemstoreDataSize.sum(); + return globalMemStoreDataSize.sum(); } /** * @return the global memstore heap size in the RegionServer */ public long getGlobalMemStoreHeapSize() { - return this.globalMemstoreHeapSize.sum(); + return this.globalMemStoreHeapSize.sum(); } /** @@ -129,13 +129,13 @@ public class RegionServerAccounting { * the global Memstore size */ public void incGlobalMemStoreSize(MemStoreSize memStoreSize) { - globalMemstoreDataSize.add(memStoreSize.getDataSize()); - globalMemstoreHeapSize.add(memStoreSize.getHeapSize()); + globalMemStoreDataSize.add(memStoreSize.getDataSize()); + globalMemStoreHeapSize.add(memStoreSize.getHeapSize()); } public void decGlobalMemStoreSize(MemStoreSize memStoreSize) { - globalMemstoreDataSize.add(-memStoreSize.getDataSize()); - globalMemstoreHeapSize.add(-memStoreSize.getHeapSize()); + globalMemStoreDataSize.add(-memStoreSize.getDataSize()); + globalMemStoreHeapSize.add(-memStoreSize.getHeapSize()); } /** @@ -203,7 +203,7 @@ public class RegionServerAccounting { if (memType == MemoryType.HEAP) { return (getGlobalMemStoreHeapSize()) * 1.0 / globalMemStoreLimitLowMark; } else { - return Math.max(getGlobalMemStoreDataSize() * 1.0 / globalMemStoreLimitLowMark, + return Math.max(getGlobalMemStoreHeapSize() * 1.0 / globalMemStoreLimitLowMark, getGlobalMemStoreHeapSize() * 1.0 / globalOnHeapMemstoreLimitLowMark); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java index 61815a540d..23c3a79597 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java @@ -66,7 +66,7 @@ public class RegionServicesForStores { } public void addMemStoreSize(MemStoreSize size) { - region.addAndGetMemStoreSize(size); + region.incMemStoreSize(size); } public RegionInfo getRegionInfo() { @@ -80,7 +80,7 @@ public class RegionServicesForStores { public ThreadPoolExecutor getInMemoryCompactionPool() { return INMEMORY_COMPACTION_POOL; } public long getMemStoreFlushSize() { - return region.getMemStoreFlushSize(); + return region.getMemStoreFlushHeapSize(); } public int getNumStores() { @@ -89,6 +89,6 @@ public class RegionServicesForStores { @VisibleForTesting long getMemStoreSize() { - return region.getMemStoreSize(); + return region.getMemStoreDataSize(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java index 121cbcae62..83cd2912f5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java @@ -23,7 +23,6 @@ import java.util.Iterator; import java.util.List; import java.util.Objects; import java.util.SortedSet; -import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.hbase.Cell; @@ -49,8 +48,8 @@ import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTe public abstract class Segment { public final static long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT - + 6 * ClassSize.REFERENCE // cellSet, comparator, memStoreLAB, dataSize, - // heapSize, and timeRangeTracker + + 5 * ClassSize.REFERENCE // cellSet, comparator, memStoreLAB, memStoreSizing, + // and timeRangeTracker + Bytes.SIZEOF_LONG // minSequenceId + Bytes.SIZEOF_BOOLEAN); // tagsPresent public final static long DEEP_OVERHEAD = FIXED_OVERHEAD + ClassSize.ATOMIC_REFERENCE @@ -62,8 +61,7 @@ public abstract class Segment { private MemStoreLAB memStoreLAB; // Sum of sizes of all Cells added to this Segment. Cell's heapSize is considered. This is not // including the heap overhead of this class. - protected final AtomicLong dataSize; - protected final AtomicLong heapSize; + protected final MemStoreSizing segmentSize; protected final TimeRangeTracker timeRangeTracker; protected volatile boolean tagsPresent; @@ -71,8 +69,7 @@ public abstract class Segment { // and there is no need in true Segments state protected Segment(CellComparator comparator, TimeRangeTracker trt) { this.comparator = comparator; - this.dataSize = new AtomicLong(0); - this.heapSize = new AtomicLong(0); + this.segmentSize = new MemStoreSizing(); this.timeRangeTracker = trt; } @@ -82,8 +79,7 @@ public abstract class Segment { this.comparator = comparator; this.minSequenceId = Long.MAX_VALUE; this.memStoreLAB = memStoreLAB; - this.dataSize = new AtomicLong(0); - this.heapSize = new AtomicLong(0); + this.segmentSize = new MemStoreSizing(); this.tagsPresent = false; this.timeRangeTracker = trt; } @@ -93,8 +89,7 @@ public abstract class Segment { this.comparator = segment.getComparator(); this.minSequenceId = segment.getMinSequenceId(); this.memStoreLAB = segment.getMemStoreLAB(); - this.dataSize = new AtomicLong(segment.keySize()); - this.heapSize = new AtomicLong(segment.heapSize.get()); + this.segmentSize = new MemStoreSizing(segment.getMemStoreSize()); this.tagsPresent = segment.isTagsPresent(); this.timeRangeTracker = segment.getTimeRangeTracker(); } @@ -133,17 +128,6 @@ public abstract class Segment { return getCellSet().size(); } - /** - * @return the first cell in the segment that has equal or greater key than the given cell - */ - public Cell getFirstAfter(Cell cell) { - SortedSet snTailSet = tailSet(cell); - if (!snTailSet.isEmpty()) { - return snTailSet.first(); - } - return null; - } - /** * Closing a segment before it is being discarded */ @@ -210,18 +194,22 @@ public abstract class Segment { return this; } + public MemStoreSize getMemStoreSize() { + return this.segmentSize; + } + /** * @return Sum of all cell's size. */ public long keySize() { - return this.dataSize.get(); + return this.segmentSize.getDataSize(); } /** * @return The heap size of this segment. */ public long heapSize() { - return this.heapSize.get(); + return this.segmentSize.getHeapSize(); } /** @@ -229,8 +217,9 @@ public abstract class Segment { */ //TODO protected void incSize(long delta, long heapOverhead) { - this.dataSize.addAndGet(delta); - this.heapSize.addAndGet(heapOverhead); + synchronized (this) { + this.segmentSize.incMemStoreSize(delta, heapOverhead); + } } public long getMinSequenceId() { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java index a690987c4f..fb968c38ec 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java @@ -94,7 +94,7 @@ public class TestGlobalMemStoreSize { long globalMemStoreSize = 0; for (RegionInfo regionInfo : ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) { - globalMemStoreSize += server.getRegion(regionInfo.getEncodedName()).getMemStoreSize(); + globalMemStoreSize += server.getRegion(regionInfo.getEncodedName()).getMemStoreDataSize(); } assertEquals(server.getRegionServerAccounting().getGlobalMemStoreDataSize(), globalMemStoreSize); @@ -125,7 +125,7 @@ public class TestGlobalMemStoreSize { for (RegionInfo regionInfo : ProtobufUtil.getOnlineRegions(null, server.getRSRpcServices())) { HRegion r = server.getRegion(regionInfo.getEncodedName()); - long l = r.getMemStoreSize(); + long l = r.getMemStoreDataSize(); if (l > 0) { // Only meta could have edits at this stage. Give it another flush // clear them. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java index dcccfd168b..2ecc0a640e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java @@ -229,30 +229,30 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase { ASYNC_CONN.getTable(tableName) .put(new Put(hri.getStartKey()).addColumn(FAMILY, FAMILY_0, Bytes.toBytes("value-1"))) .join(); - Assert.assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0); + Assert.assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize() > 0); // flush region and wait flush operation finished. LOG.info("flushing region: " + Bytes.toStringBinary(hri.getRegionName())); admin.flushRegion(hri.getRegionName()).get(); LOG.info("blocking until flush is complete: " + Bytes.toStringBinary(hri.getRegionName())); Threads.sleepWithoutInterrupt(500); - while (regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0) { + while (regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize() > 0) { Threads.sleep(50); } // check the memstore. - Assert.assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize(), 0); + Assert.assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize(), 0); // write another put into the specific region ASYNC_CONN.getTable(tableName) .put(new Put(hri.getStartKey()).addColumn(FAMILY, FAMILY_0, Bytes.toBytes("value-2"))) .join(); - Assert.assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0); + Assert.assertTrue(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize() > 0); admin.flush(tableName).get(); Threads.sleepWithoutInterrupt(500); - while (regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize() > 0) { + while (regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize() > 0) { Threads.sleep(50); } // check the memstore. - Assert.assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreSize(), 0); + Assert.assertEquals(regionServer.getOnlineRegion(hri.getRegionName()).getMemStoreDataSize(), 0); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java index 62eb31674f..ccbfdf12b7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.client.backoff.ServerStatistics; import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; -import org.apache.hadoop.hbase.regionserver.MemStoreSize; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; @@ -105,7 +104,7 @@ public class TestClientPushback { mutator.flush(); // get the current load on RS. Hopefully memstore isn't flushed since we wrote the the data - int load = (int) ((((HRegion) region).addAndGetMemStoreSize(new MemStoreSize(0, 0)) * 100) + int load = (int) ((((HRegion) region).getMemStoreDataSize() * 100) / flushSizeBytes); LOG.debug("Done writing some data to "+tableName); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java index 877265d29b..1d563e31d7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemStoreSizeWithSlowCoprocessor.java @@ -20,7 +20,6 @@ import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.MemStoreSize; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; @@ -98,7 +97,7 @@ public class TestNegativeMemStoreSizeWithSlowCoprocessor { if (Bytes.equals(put.getRow(), Bytes.toBytes("row2"))) { region.flush(false); - Assert.assertTrue(region.addAndGetMemStoreSize(new MemStoreSize()) >= 0); + Assert.assertTrue(region.getMemStoreDataSize() >= 0); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java index 6fbf99adf6..7ac5a3d052 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java @@ -693,7 +693,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { int totalCellsLen1 = addRowsByKeys(memstore, keys1);// Adding 4 cells. int oneCellOnCSLMHeapSize = 120; int oneCellOnCAHeapSize = 88; - assertEquals(totalCellsLen1, region.getMemStoreSize()); + assertEquals(totalCellsLen1, region.getMemStoreDataSize()); long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 4 * oneCellOnCSLMHeapSize; assertEquals(totalHeapSize, ((CompactingMemStore)memstore).heapSize()); ((CompactingMemStore)memstore).flushInMemory(); // push keys to pipeline and compact @@ -774,7 +774,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { int totalCellsLen1 = addRowsByKeys(memstore, keys1);// Adding 3 cells. int oneCellOnCSLMHeapSize = 120; - assertEquals(totalCellsLen1, region.getMemStoreSize()); + assertEquals(totalCellsLen1, region.getMemStoreDataSize()); long totalHeapSize = MutableSegment.DEEP_OVERHEAD + 3 * oneCellOnCSLMHeapSize; assertEquals(totalHeapSize, memstore.heapSize()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java index 673acdd8c2..7686c5675e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellFlatMapMemStore.java @@ -214,7 +214,7 @@ public class TestCompactingToCellFlatMapMemStore extends TestCompactingMemStore long cellBeforeFlushSize = cellBeforeFlushSize(); long cellAfterFlushSize = cellAfterFlushSize(); long totalHeapSize1 = MutableSegment.DEEP_OVERHEAD + 4 * cellBeforeFlushSize; - assertEquals(totalCellsLen1, region.getMemStoreSize()); + assertEquals(totalCellsLen1, region.getMemStoreDataSize()); assertEquals(totalHeapSize1, ((CompactingMemStore) memstore).heapSize()); MemStoreSize size = memstore.getFlushableSize(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java index e5b8a6146a..c425ec8f7a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java @@ -314,7 +314,7 @@ public class TestEndToEndSplitTransaction { admin.flushRegion(regionName); log("blocking until flush is complete: " + Bytes.toStringBinary(regionName)); Threads.sleepWithoutInterrupt(500); - while (rs.getOnlineRegion(regionName).getMemStoreSize() > 0) { + while (rs.getOnlineRegion(regionName).getMemStoreDataSize() > 0) { Threads.sleep(50); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 0098091699..70b25940ce 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -308,7 +308,7 @@ public class TestHRegion { region.put(put); // Close with something in memstore and something in the snapshot. Make sure all is cleared. region.close(); - assertEquals(0, region.getMemStoreSize()); + assertEquals(0, region.getMemStoreDataSize()); HBaseTestingUtility.closeRegionAndWAL(region); } @@ -390,14 +390,14 @@ public class TestHRegion { HRegion region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, hLog, COLUMN_FAMILY_BYTES); HStore store = region.getStore(COLUMN_FAMILY_BYTES); - assertEquals(0, region.getMemStoreSize()); + assertEquals(0, region.getMemStoreDataSize()); // Put one value byte [] value = Bytes.toBytes(method); Put put = new Put(value); put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value); region.put(put); - long onePutSize = region.getMemStoreSize(); + long onePutSize = region.getMemStoreDataSize(); assertTrue(onePutSize > 0); RegionCoprocessorHost mockedCPHost = Mockito.mock(RegionCoprocessorHost.class); @@ -413,7 +413,7 @@ public class TestHRegion { } catch (IOException expected) { } long expectedSize = onePutSize * 2; - assertEquals("memstoreSize should be incremented", expectedSize, region.getMemStoreSize()); + assertEquals("memstoreSize should be incremented", expectedSize, region.getMemStoreDataSize()); assertEquals("flushable size should be incremented", expectedSize, store.getFlushableSize().getDataSize()); @@ -458,13 +458,13 @@ public class TestHRegion { // Initialize region region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); - long size = region.getMemStoreSize(); + long size = region.getMemStoreDataSize(); Assert.assertEquals(0, size); // Put one item into memstore. Measure the size of one item in memstore. Put p1 = new Put(row); p1.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual1, 1, (byte[]) null)); region.put(p1); - final long sizeOfOnePut = region.getMemStoreSize(); + final long sizeOfOnePut = region.getMemStoreDataSize(); // Fail a flush which means the current memstore will hang out as memstore 'snapshot'. try { LOG.info("Flushing"); @@ -477,7 +477,7 @@ public class TestHRegion { // Make it so all writes succeed from here on out ffs.fault.set(false); // Check sizes. Should still be the one entry. - Assert.assertEquals(sizeOfOnePut, region.getMemStoreSize()); + Assert.assertEquals(sizeOfOnePut, region.getMemStoreDataSize()); // Now add two entries so that on this next flush that fails, we can see if we // subtract the right amount, the snapshot size only. Put p2 = new Put(row); @@ -485,13 +485,13 @@ public class TestHRegion { p2.add(new KeyValue(row, COLUMN_FAMILY_BYTES, qual3, 3, (byte[])null)); region.put(p2); long expectedSize = sizeOfOnePut * 3; - Assert.assertEquals(expectedSize, region.getMemStoreSize()); + Assert.assertEquals(expectedSize, region.getMemStoreDataSize()); // Do a successful flush. It will clear the snapshot only. Thats how flushes work. // If already a snapshot, we clear it else we move the memstore to be snapshot and flush // it region.flush(true); // Make sure our memory accounting is right. - Assert.assertEquals(sizeOfOnePut * 2, region.getMemStoreSize()); + Assert.assertEquals(sizeOfOnePut * 2, region.getMemStoreDataSize()); } finally { HBaseTestingUtility.closeRegionAndWAL(region); } @@ -523,7 +523,7 @@ public class TestHRegion { // Initialize region region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, wal, COLUMN_FAMILY_BYTES); - long size = region.getMemStoreSize(); + long size = region.getMemStoreDataSize(); Assert.assertEquals(0, size); // Put one item into memstore. Measure the size of one item in memstore. Put p1 = new Put(row); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java index 55b5e643f6..5cca94b55f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java @@ -363,7 +363,7 @@ public class TestHRegionReplayEvents { verifyData(secondaryRegion, 0, lastReplayed, cq, families); HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1")); long storeMemstoreSize = store.getMemStoreSize().getHeapSize(); - long regionMemstoreSize = secondaryRegion.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemStoreDataSize(); long storeFlushableSize = store.getFlushableSize().getHeapSize(); long storeSize = store.getSize(); long storeSizeUncompressed = store.getStoreSizeUncompressed(); @@ -392,7 +392,7 @@ public class TestHRegionReplayEvents { assertTrue(storeFlushableSize > newFlushableSize); // assert that the region memstore is smaller now - long newRegionMemstoreSize = secondaryRegion.getMemStoreSize(); + long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertTrue(regionMemstoreSize > newRegionMemstoreSize); // assert that the store sizes are bigger @@ -462,7 +462,7 @@ public class TestHRegionReplayEvents { // first verify that everything is replayed and visible before flush event replay HStore store = secondaryRegion.getStore(Bytes.toBytes("cf1")); long storeMemstoreSize = store.getMemStoreSize().getHeapSize(); - long regionMemstoreSize = secondaryRegion.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemStoreDataSize(); long storeFlushableSize = store.getFlushableSize().getHeapSize(); if (flushDesc.getAction() == FlushAction.START_FLUSH) { @@ -502,7 +502,7 @@ public class TestHRegionReplayEvents { assertNotNull(secondaryRegion.getPrepareFlushResult()); assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId, startFlushDesc.getFlushSequenceNumber()); - assertTrue(secondaryRegion.getMemStoreSize() > 0); // memstore is not empty + assertTrue(secondaryRegion.getMemStoreDataSize() > 0); // memstore is not empty verifyData(secondaryRegion, 0, numRows, cq, families); // Test case 2: replay a flush start marker with a smaller seqId @@ -515,7 +515,7 @@ public class TestHRegionReplayEvents { assertNotNull(secondaryRegion.getPrepareFlushResult()); assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId, startFlushDesc.getFlushSequenceNumber()); - assertTrue(secondaryRegion.getMemStoreSize() > 0); // memstore is not empty + assertTrue(secondaryRegion.getMemStoreDataSize() > 0); // memstore is not empty verifyData(secondaryRegion, 0, numRows, cq, families); // Test case 3: replay a flush start marker with a larger seqId @@ -528,7 +528,7 @@ public class TestHRegionReplayEvents { assertNotNull(secondaryRegion.getPrepareFlushResult()); assertEquals(secondaryRegion.getPrepareFlushResult().flushOpSeqId, startFlushDesc.getFlushSequenceNumber()); - assertTrue(secondaryRegion.getMemStoreSize() > 0); // memstore is not empty + assertTrue(secondaryRegion.getMemStoreDataSize() > 0); // memstore is not empty verifyData(secondaryRegion, 0, numRows, cq, families); LOG.info("-- Verifying edits from secondary"); @@ -597,7 +597,7 @@ public class TestHRegionReplayEvents { for (HStore s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount, s.getStorefilesCount()); } - long regionMemstoreSize = secondaryRegion.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemStoreDataSize(); // Test case 1: replay the a flush commit marker smaller than what we have prepared LOG.info("Testing replaying flush COMMIT " + commitFlushDesc + " on top of flush START" @@ -617,7 +617,7 @@ public class TestHRegionReplayEvents { assertTrue(newFlushableSize > 0); // assert that the memstore is not dropped // assert that the region memstore is same as before - long newRegionMemstoreSize = secondaryRegion.getMemStoreSize(); + long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertEquals(regionMemstoreSize, newRegionMemstoreSize); assertNotNull(secondaryRegion.getPrepareFlushResult()); // not dropped @@ -687,7 +687,7 @@ public class TestHRegionReplayEvents { for (HStore s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount, s.getStorefilesCount()); } - long regionMemstoreSize = secondaryRegion.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemStoreDataSize(); // Test case 1: replay the a flush commit marker larger than what we have prepared LOG.info("Testing replaying flush COMMIT " + commitFlushDesc + " on top of flush START" @@ -707,7 +707,7 @@ public class TestHRegionReplayEvents { assertTrue(newFlushableSize > 0); // assert that the memstore is not dropped // assert that the region memstore is smaller than before, but not empty - long newRegionMemstoreSize = secondaryRegion.getMemStoreSize(); + long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertTrue(newRegionMemstoreSize > 0); assertTrue(regionMemstoreSize > newRegionMemstoreSize); @@ -788,7 +788,7 @@ public class TestHRegionReplayEvents { for (HStore s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount, s.getStorefilesCount()); } - long regionMemstoreSize = secondaryRegion.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemStoreDataSize(); // Test case 1: replay a flush commit marker without start flush marker assertNull(secondaryRegion.getPrepareFlushResult()); @@ -817,7 +817,7 @@ public class TestHRegionReplayEvents { } // assert that the region memstore is same as before (we could not drop) - long newRegionMemstoreSize = secondaryRegion.getMemStoreSize(); + long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); if (droppableMemstore) { assertTrue(0 == newRegionMemstoreSize); } else { @@ -887,7 +887,7 @@ public class TestHRegionReplayEvents { for (HStore s : secondaryRegion.getStores()) { assertEquals(expectedStoreFileCount, s.getStorefilesCount()); } - long regionMemstoreSize = secondaryRegion.getMemStoreSize(); + long regionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertTrue(regionMemstoreSize == 0); // now replay the region open event that should contain new file locations @@ -904,7 +904,7 @@ public class TestHRegionReplayEvents { assertTrue(newFlushableSize == MutableSegment.DEEP_OVERHEAD); // assert that the region memstore is empty - long newRegionMemstoreSize = secondaryRegion.getMemStoreSize(); + long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertTrue(newRegionMemstoreSize == 0); assertNull(secondaryRegion.getPrepareFlushResult()); //prepare snapshot should be dropped if any @@ -983,7 +983,7 @@ public class TestHRegionReplayEvents { assertTrue(newSnapshotSize.getDataSize() == 0); // assert that the region memstore is empty - long newRegionMemstoreSize = secondaryRegion.getMemStoreSize(); + long newRegionMemstoreSize = secondaryRegion.getMemStoreDataSize(); assertTrue(newRegionMemstoreSize == 0); assertNull(secondaryRegion.getPrepareFlushResult()); //prepare snapshot should be dropped if any @@ -1431,7 +1431,7 @@ public class TestHRegionReplayEvents { LOG.info("-- Replaying edits in secondary"); // Test case 4: replay some edits, ensure that memstore is dropped. - assertTrue(secondaryRegion.getMemStoreSize() == 0); + assertTrue(secondaryRegion.getMemStoreDataSize() == 0); putDataWithFlushes(primaryRegion, 400, 400, 0); numRows = 400; @@ -1449,11 +1449,11 @@ public class TestHRegionReplayEvents { } } - assertTrue(secondaryRegion.getMemStoreSize() > 0); + assertTrue(secondaryRegion.getMemStoreDataSize() > 0); secondaryRegion.refreshStoreFiles(); - assertTrue(secondaryRegion.getMemStoreSize() == 0); + assertTrue(secondaryRegion.getMemStoreDataSize() == 0); LOG.info("-- Verifying edits from primary"); verifyData(primaryRegion, 0, numRows, cq, families); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java index cf1c104105..a2ea27ae18 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java @@ -143,7 +143,7 @@ public class TestPerColumnFamilyFlush { } } - long totalMemstoreSize = region.getMemStoreSize(); + long totalMemstoreSize = region.getMemStoreDataSize(); // Find the smallest LSNs for edits wrt to each CF. long smallestSeqCF1 = region.getOldestSeqIdOfStore(FAMILY1); @@ -186,7 +186,7 @@ public class TestPerColumnFamilyFlush { cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize(); cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize(); cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize(); - totalMemstoreSize = region.getMemStoreSize(); + totalMemstoreSize = region.getMemStoreDataSize(); smallestSeqInRegionCurrentMemstore = getWAL(region) .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); @@ -224,7 +224,7 @@ public class TestPerColumnFamilyFlush { cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize(); cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize(); cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize(); - totalMemstoreSize = region.getMemStoreSize(); + totalMemstoreSize = region.getMemStoreDataSize(); smallestSeqInRegionCurrentMemstore = getWAL(region) .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); @@ -260,7 +260,7 @@ public class TestPerColumnFamilyFlush { // Since we won't find any CF above the threshold, and hence no specific // store to flush, we should flush all the memstores. - assertEquals(0, region.getMemStoreSize()); + assertEquals(0, region.getMemStoreDataSize()); HBaseTestingUtility.closeRegionAndWAL(region); } @@ -284,7 +284,7 @@ public class TestPerColumnFamilyFlush { } } - long totalMemstoreSize = region.getMemStoreSize(); + long totalMemstoreSize = region.getMemStoreDataSize(); // Find the sizes of the memstores of each CF. MemStoreSize cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize(); @@ -307,7 +307,7 @@ public class TestPerColumnFamilyFlush { cf1MemstoreSize = region.getStore(FAMILY1).getMemStoreSize(); cf2MemstoreSize = region.getStore(FAMILY2).getMemStoreSize(); cf3MemstoreSize = region.getStore(FAMILY3).getMemStoreSize(); - totalMemstoreSize = region.getMemStoreSize(); + totalMemstoreSize = region.getMemStoreDataSize(); long smallestSeqInRegionCurrentMemstore = region.getWAL().getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); @@ -376,7 +376,7 @@ public class TestPerColumnFamilyFlush { long totalMemstoreSize; long cf1MemstoreSize, cf2MemstoreSize, cf3MemstoreSize; - totalMemstoreSize = desiredRegion.getMemStoreSize(); + totalMemstoreSize = desiredRegion.getMemStoreDataSize(); // Find the sizes of the memstores of each CF. cf1MemstoreSize = desiredRegion.getStore(FAMILY1).getMemStoreSize().getDataSize(); @@ -499,12 +499,12 @@ public class TestPerColumnFamilyFlush { @Override public boolean evaluate() throws Exception { - return desiredRegion.getMemStoreSize() == 0; + return desiredRegion.getMemStoreDataSize() == 0; } @Override public String explainFailure() throws Exception { - long memstoreSize = desiredRegion.getMemStoreSize(); + long memstoreSize = desiredRegion.getMemStoreDataSize(); if (memstoreSize > 0) { return "Still have unflushed entries in memstore, memstore size is " + memstoreSize; } @@ -546,7 +546,7 @@ public class TestPerColumnFamilyFlush { put.addColumn(FAMILY3, qf, value3); table.put(put); // slow down to let regionserver flush region. - while (region.getMemStoreSize() > memstoreFlushSize) { + while (region.getMemStoreHeapSize() > memstoreFlushSize) { Thread.sleep(100); } } @@ -619,7 +619,6 @@ public class TestPerColumnFamilyFlush { } finally { TEST_UTIL.shutdownMiniCluster(); } - LOG.info("disable selective flush: " + Bytes.toString(FAMILY1) + "=>" + cf1StoreFileCount + ", " + Bytes.toString(FAMILY2) + "=>" + cf2StoreFileCount + ", " + Bytes.toString(FAMILY3) + "=>" + cf3StoreFileCount); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java index 0122674ce1..aea3ba1514 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerAccounting.java @@ -37,7 +37,7 @@ public class TestRegionServerAccounting { // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); MemStoreSize memstoreSize = - new MemStoreSize((long) (3l * 1024l * 1024l * 1024l), (long) (1l * 1024l * 1024l * 1024l)); + new MemStoreSize((3L * 1024L * 1024L * 1024L), (1L * 1024L * 1024L * 1024L)); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_ONHEAP_HIGHER_MARK, regionServerAccounting.isAboveHighWaterMark()); @@ -50,7 +50,7 @@ public class TestRegionServerAccounting { // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); MemStoreSize memstoreSize = - new MemStoreSize((long) (3l * 1024l * 1024l * 1024l), (long) (1l * 1024l * 1024l * 1024l)); + new MemStoreSize((3L * 1024L * 1024L * 1024L), (1L * 1024L * 1024L * 1024L)); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_ONHEAP_LOWER_MARK, regionServerAccounting.isAboveLowWaterMark()); @@ -60,12 +60,12 @@ public class TestRegionServerAccounting { public void testOffheapMemstoreHigherWaterMarkLimitsDueToDataSize() { Configuration conf = HBaseConfiguration.create(); // setting 1G as offheap data size - conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1l * 1024l)); + conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024L)); // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); // this will breach offheap limit as data size is higher and not due to heap size MemStoreSize memstoreSize = - new MemStoreSize((long) (3l * 1024l * 1024l * 1024l), (long) (1l * 1024l * 1024l * 1024l)); + new MemStoreSize((3L * 1024L * 1024L * 1024L), 0); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_OFFHEAP_HIGHER_MARK, regionServerAccounting.isAboveHighWaterMark()); @@ -76,12 +76,12 @@ public class TestRegionServerAccounting { Configuration conf = HBaseConfiguration.create(); conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_KEY, 0.2f); // setting 1G as offheap data size - conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1l * 1024l)); + conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024L)); // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); // this will breach higher limit as heap size is higher and not due to offheap size MemStoreSize memstoreSize = - new MemStoreSize((long) (3l * 1024l * 1024l), (long) (2l * 1024l * 1024l * 1024l)); + new MemStoreSize((3L * 1024L * 1024L), (2L * 1024L * 1024L * 1024L)); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_ONHEAP_HIGHER_MARK, regionServerAccounting.isAboveHighWaterMark()); @@ -91,12 +91,12 @@ public class TestRegionServerAccounting { public void testOffheapMemstoreLowerWaterMarkLimitsDueToDataSize() { Configuration conf = HBaseConfiguration.create(); // setting 1G as offheap data size - conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1l * 1024l)); + conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024L)); // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); // this will breach offheap limit as data size is higher and not due to heap size MemStoreSize memstoreSize = - new MemStoreSize((long) (3l * 1024l * 1024l * 1024l), (long) (1l * 1024l * 1024l * 1024l)); + new MemStoreSize((3L * 1024L * 1024L * 1024L), (1L * 1024L * 1024L * 1024L)); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_OFFHEAP_LOWER_MARK, regionServerAccounting.isAboveLowWaterMark()); @@ -107,12 +107,12 @@ public class TestRegionServerAccounting { Configuration conf = HBaseConfiguration.create(); conf.setFloat(MemorySizeUtil.MEMSTORE_SIZE_KEY, 0.2f); // setting 1G as offheap data size - conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1l * 1024l)); + conf.setLong(MemorySizeUtil.OFFHEAP_MEMSTORE_SIZE_KEY, (1L * 1024L)); // try for default cases RegionServerAccounting regionServerAccounting = new RegionServerAccounting(conf); // this will breach higher limit as heap size is higher and not due to offheap size MemStoreSize memstoreSize = - new MemStoreSize((long) (3l * 1024l * 1024l), (long) (2l * 1024l * 1024l * 1024l)); + new MemStoreSize((3L * 1024L * 1024L), (2L * 1024L * 1024L * 1024L)); regionServerAccounting.incGlobalMemStoreSize(memstoreSize); assertEquals(FlushType.ABOVE_ONHEAP_LOWER_MARK, regionServerAccounting.isAboveLowWaterMark()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java index 62d22d201b..b6205ed12e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java @@ -260,8 +260,8 @@ public class TestWALLockup { Thread t = new Thread ("Flusher") { public void run() { try { - if (region.getMemStoreSize() <= 0) { - throw new IOException("memstore size=" + region.getMemStoreSize()); + if (region.getMemStoreDataSize() <= 0) { + throw new IOException("memstore size=" + region.getMemStoreDataSize()); } region.flush(false); } catch (IOException e) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java index 6a647964db..acac984148 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java @@ -161,7 +161,7 @@ public class TestWalAndCompactingMemStoreFlush { region.put(createPut(2, i)); } - long totalMemstoreSize = region.getMemStoreSize(); + long totalMemstoreSize = region.getMemStoreDataSize(); // Find the smallest LSNs for edits wrt to each CF. long smallestSeqCF1PhaseI = region.getOldestSeqIdOfStore(FAMILY1); @@ -352,13 +352,13 @@ public class TestWalAndCompactingMemStoreFlush { s = s + "----AFTER THIRD AND FORTH FLUSH, The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseV + ". After additional inserts and last flush, the entire region size is:" + region - .getMemStoreSize() + .getMemStoreDataSize() + "\n----------------------------------\n"; // Since we won't find any CF above the threshold, and hence no specific // store to flush, we should flush all the memstores // Also compacted memstores are flushed to disk. - assertEquals(0, region.getMemStoreSize()); + assertEquals(0, region.getMemStoreDataSize()); System.out.println(s); HBaseTestingUtility.closeRegionAndWAL(region); } @@ -404,7 +404,7 @@ public class TestWalAndCompactingMemStoreFlush { /*------------------------------------------------------------------------------*/ /*------------------------------------------------------------------------------*/ /* PHASE I - collect sizes */ - long totalMemstoreSizePhaseI = region.getMemStoreSize(); + long totalMemstoreSizePhaseI = region.getMemStoreDataSize(); // Find the smallest LSNs for edits wrt to each CF. long smallestSeqCF1PhaseI = region.getOldestSeqIdOfStore(FAMILY1); long smallestSeqCF2PhaseI = region.getOldestSeqIdOfStore(FAMILY2); @@ -467,7 +467,7 @@ public class TestWalAndCompactingMemStoreFlush { .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); // Find the smallest LSNs for edits wrt to each CF. long smallestSeqCF3PhaseII = region.getOldestSeqIdOfStore(FAMILY3); - long totalMemstoreSizePhaseII = region.getMemStoreSize(); + long totalMemstoreSizePhaseII = region.getMemStoreDataSize(); /*------------------------------------------------------------------------------*/ /* PHASE II - validation */ @@ -510,7 +510,7 @@ public class TestWalAndCompactingMemStoreFlush { /* PHASE III - collect sizes */ // How much does the CF1 memstore occupy now? Will be used later. MemStoreSize cf1MemstoreSizePhaseIII = region.getStore(FAMILY1).getMemStoreSize(); - long totalMemstoreSizePhaseIII = region.getMemStoreSize(); + long totalMemstoreSizePhaseIII = region.getMemStoreDataSize(); /*------------------------------------------------------------------------------*/ /* PHASE III - validation */ @@ -568,7 +568,7 @@ public class TestWalAndCompactingMemStoreFlush { MemStoreSize cf3MemstoreSizePhaseV = region.getStore(FAMILY3).getMemStoreSize(); long smallestSeqInRegionCurrentMemstorePhaseV = getWAL(region) .getEarliestMemStoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes()); - long totalMemstoreSizePhaseV = region.getMemStoreSize(); + long totalMemstoreSizePhaseV = region.getMemStoreDataSize(); /*------------------------------------------------------------------------------*/ /* PHASE V - validation */ @@ -660,7 +660,7 @@ public class TestWalAndCompactingMemStoreFlush { ((CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore).setCompositeSnapshot(false); ((CompactingMemStore) ((HStore) region.getStore(FAMILY3)).memstore).setCompositeSnapshot(false); - long totalMemstoreSize = region.getMemStoreSize(); + long totalMemstoreSize = region.getMemStoreDataSize(); // Find the sizes of the memstores of each CF. MemStoreSize cf1MemstoreSizePhaseI = region.getStore(FAMILY1).getMemStoreSize(); @@ -795,7 +795,7 @@ public class TestWalAndCompactingMemStoreFlush { region.put(createPut(2, i)); } - long totalMemstoreSize = region.getMemStoreSize(); + long totalMemstoreSize = region.getMemStoreDataSize(); // Find the sizes of the memstores of each CF. MemStoreSize cf1MemstoreSizePhaseI = region.getStore(FAMILY1).getMemStoreSize(); -- 2.14.3 (Apple Git-98)