From 7527e78e497e8688ce72b787f51f68c47c6f196a Mon Sep 17 00:00:00 2001 From: Michael Stack Date: Wed, 9 May 2018 14:13:03 -0700 Subject: [PATCH] HBASE-20483 tests --- hbase-server/pom.xml | 10 ++ .../org/apache/hadoop/hbase/io/hfile/HFile.java | 4 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 46 ++++-- .../apache/hadoop/hbase/regionserver/HStore.java | 4 +- .../hadoop/hbase/regionserver/SegmentScanner.java | 11 ++ .../org/apache/hadoop/hbase/wal/WALFactory.java | 6 +- .../hbase/regionserver/TestFlushPerformance.java | 169 +++++++++++++++++++++ .../regionserver/TestMemstorePerformance.java | 90 +++++++++++ 8 files changed, 322 insertions(+), 18 deletions(-) create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFlushPerformance.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemstorePerformance.java diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index 28e6f34b39..3a89e450a2 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -576,6 +576,16 @@ mockito-core test + + org.mockito + mockito-core + compile + + + org.mockito + mockito-core + compile + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java index 4c9f6f6d65..5bcaa172d1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java @@ -332,8 +332,8 @@ public class HFile { try { ostream.setDropBehind(shouldDropBehind && cacheConf.shouldDropBehindCompaction()); } catch (UnsupportedOperationException uoe) { - if (LOG.isTraceEnabled()) LOG.trace("Unable to set drop behind on " + path, uoe); - else if (LOG.isDebugEnabled()) LOG.debug("Unable to set drop behind on " + path); + LOG.trace("Unable to set drop behind on {}", path, uoe); + LOG.debug("Unable to set drop behind on {}", path.getName()); } } return new HFileWriterImpl(conf, cacheConf, path, ostream, comparator, fileContext); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index f41724719e..ce89f2793f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -27,6 +27,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.InterruptedIOException; import java.lang.reflect.Constructor; +import java.net.InetSocketAddress; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.text.ParseException; @@ -75,17 +76,21 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellBuilderType; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.CompoundConfiguration; +import org.apache.hadoop.hbase.CoordinatedStateManager; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DroppedSnapshotException; import org.apache.hadoop.hbase.ExtendedCellBuilderFactory; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HConstants.OperationStatusCode; import org.apache.hadoop.hbase.HDFSBlocksDistribution; @@ -95,13 +100,17 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.RegionTooBusyException; +import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagUtil; import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.ClusterConnection; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; import org.apache.hadoop.hbase.client.CompactionState; +import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Get; @@ -118,6 +127,7 @@ import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.client.locking.EntityLock; import org.apache.hadoop.hbase.conf.ConfigurationManager; import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; import org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType; @@ -136,18 +146,23 @@ import org.apache.hadoop.hbase.ipc.CallerDisconnectedException; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcCall; import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.ipc.RpcServerInterface; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.monitoring.TaskMonitor; +import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager; +import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry; import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequester; import org.apache.hadoop.hbase.regionserver.throttle.CompactionThroughputControllerFactory; import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController; import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; import org.apache.hadoop.hbase.regionserver.wal.WALUtil; import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; import org.apache.hadoop.hbase.snapshot.SnapshotManifest; import org.apache.hadoop.hbase.trace.TraceUtil; @@ -171,10 +186,13 @@ import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALSplitter; import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay; +import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.io.MultipleIOException; import org.apache.hadoop.util.StringUtils; import org.apache.htrace.core.TraceScope; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -287,7 +305,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi private Map coprocessorServiceHandlers = Maps.newHashMap(); // Track data size in all memstores - private final MemStoreSizing memStoreSize = new MemStoreSizing(); + @VisibleForTesting + final MemStoreSizing memStoreSize = new MemStoreSizing(); private final RegionServicesForStores regionServicesForStores = new RegionServicesForStores(this); // Debug possible data loss due to WAL off @@ -2522,9 +2541,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi .append(StringUtils.byteDesc(store.getFlushableSize().getDataSize())); } } - LOG.info("Flushing " + + storesToFlush.size() + "/" + stores.size() + " column families," + - " memstore data size=" + StringUtils.byteDesc(this.memStoreSize.getDataSize()) + - " memstore heap size=" + StringUtils.byteDesc(this.memStoreSize.getHeapSize()) + + LOG.info("Flushing " + + storesToFlush.size() + "/" + stores.size() + " column family(s)," + + " data size=" + StringUtils.byteDesc(this.memStoreSize.getDataSize()) + + ", heap size=" + StringUtils.byteDesc(this.memStoreSize.getHeapSize()) + ((perCfExtras != null && perCfExtras.length() > 0)? perCfExtras.toString(): "") + ((wal != null) ? "" : "; WAL is null, using passed sequenceid=" + sequenceId)); } @@ -2714,10 +2733,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi long flushableDataSize = prepareResult.totalFlushableSize.getDataSize(); long flushableHeapSize = prepareResult.totalFlushableSize.getHeapSize(); long memstoresize = this.memStoreSize.getDataSize(); - String msg = "Finished memstore flush;" - + " data size ~" + StringUtils.byteDesc(flushableDataSize) + "/" + flushableDataSize - + ", heap size ~" + StringUtils.byteDesc(flushableHeapSize) + "/" + flushableHeapSize - + ", currentsize=" + StringUtils.byteDesc(memstoresize) + "/" + memstoresize + String msg = "Finished flush of " + + " data=" + StringUtils.byteDesc(flushableDataSize) + "/" + flushableDataSize + + ", heap=" + StringUtils.byteDesc(flushableHeapSize) + "/" + flushableHeapSize + + ", current data=" + StringUtils.byteDesc(memstoresize) + "/" + memstoresize + " for " + this.getRegionInfo().getEncodedName() + " in " + time + "ms, sequenceid=" + flushOpSeqId + ", compaction requested=" + compactionRequested + ((wal == null) ? "; wal=null" : ""); @@ -4184,12 +4203,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi requestFlush(); // Don't print current limit because it will vary too much. The message is used as a key // over in RetriesExhaustedWithDetailsException processing. - throw new RegionTooBusyException("Over memstore limit; regionName=" + + throw new RegionTooBusyException("Over memstore limit=" + + org.apache.hadoop.hbase.procedure2.util.StringUtils.humanSize(this.blockingMemStoreSize) + + ", regionName=" + (this.getRegionInfo() == null? "unknown": this.getRegionInfo().getEncodedName()) + - ", server=" + (this.getRegionServerServices() == null ? "unknown": - this.getRegionServerServices().getServerName()) + - ", blockingMemStoreSize=" + - org.apache.hadoop.hbase.procedure2.util.StringUtils.humanSize(blockingMemStoreSize)); + ", server=" + (this.getRegionServerServices() == null? "unknown": + this.getRegionServerServices().getServerName())); } } @@ -8494,4 +8513,5 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi public void requestFlush(FlushLifeCycleTracker tracker) throws IOException { requestFlush0(tracker); } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 351a84704e..b688c47a34 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -139,7 +139,9 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, Propagat private static final Logger LOG = LoggerFactory.getLogger(HStore.class); - protected final MemStore memstore; + @VisibleForTesting + final MemStore memstore; + // This stores directory in the filesystem. protected final HRegion region; private final ColumnFamilyDescriptor family; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java index a8b0d3d501..417083f11b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java @@ -162,9 +162,20 @@ public class SegmentScanner implements KeyValueScanner { get it. So we remember the last keys we iterated to and restore the reseeked set to at least that point. */ + /* iter = getIterator(getHighest(cell, last)); updateCurrent(); return (current != null); + */ + while(iter.hasNext()){ + Cell next = iter.next(); + int ret = segment.compare(next, cell); + if(ret >= 0){ + current = next; + return true; + } + } + return false; } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java index 1410b532e0..98867a89e8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java @@ -47,9 +47,11 @@ import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti * implementations: *
    *
  • defaultProvider : whatever provider is standard for the hbase version. Currently - * "filesystem"
  • + * "asyncfs" + *
  • asyncfs : a provider that will run on top of an implementation of the Hadoop + * FileSystem interface via an asynchronous client.
  • *
  • filesystem : a provider that will run on top of an implementation of the Hadoop - * FileSystem interface, normally HDFS.
  • + * FileSystem interface via HDFS's synchronous DFSClient. *
  • multiwal : a provider that will use multiple "filesystem" wal instances per region * server.
  • *
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFlushPerformance.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFlushPerformance.java new file mode 100644 index 0000000000..ce268dc2da --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFlushPerformance.java @@ -0,0 +1,169 @@ +package org.apache.hadoop.hbase.regionserver; + +import com.sun.org.apache.xpath.internal.axes.WalkerFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CancelableProgressable; +import org.apache.hadoop.hbase.wal.WAL; +import org.apache.hadoop.hbase.wal.WALFactory; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import org.mockito.Mockito; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Random; + +import static org.junit.Assert.assertTrue; + + +/** + * Test with a main that is about loading a region and then re-using the load just to profile + * region flush rate. Needed because hbase2 is slower than hbase1 flushing. + */ +@Category(LargeTests.class) +public class TestFlushPerformance { + @Rule public TestName name = new TestName(); + + private static final Logger LOG = LoggerFactory.getLogger(TestFlushPerformance.class); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static WALFactory WALFACTORY; + private static final Random RANDOM = new Random(); + private static final String FAMILY_NAME = "cf"; + private static final byte [] FAMILY_NAME_BYTES = Bytes.toBytes(FAMILY_NAME); + private static final int TOTAL_ROWS = 400000; + private static final int COLUMNS = 10; + private static final byte [] VALUE = Bytes.toBytes("Some old value"); + + @BeforeClass + public static void beforeClass() throws IOException { + // Make it so we use the DisableWALProvider; i.e. no WAL + // TODO: Doc. how to get a disabled WAL. Too hard to figure currently. + Configuration conf = TEST_UTIL.getConfiguration(); + conf.setBoolean("hbase.regionserver.hlog.enabled", false); + assertTrue(!conf.getBoolean("hbase.regionserver.hlog.enabled", true)); + WALFACTORY = new WALFactory(conf, TestFlushPerformance.class.getSimpleName()); + ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, + 0, 0, null); + } + + @Test + public void testFlush() throws IOException { + HRegion region = getRegion(WALFACTORY, + TEST_UTIL.getConfiguration(), this.name.getMethodName()); + load(region, TOTAL_ROWS); + long dataSize = region.memStoreSize.getDataSize(); + long heapSize = region.memStoreSize.getHeapSize(); + try { + for (;;) { + MutableSegment activeReference = + ((AbstractMemStore) region.getStore(FAMILY_NAME_BYTES).memstore).active; + HRegion.FlushResult flushResult = region.flush(true); + assertTrue(flushResult.isFlushSucceeded()); + // Put the reference back in place to see if we can flush again. Restore datasize too. + ((AbstractMemStore)region.getStore(FAMILY_NAME_BYTES).memstore).active = activeReference; + region.memStoreSize.dataSize = dataSize; + region.memStoreSize.heapSize = heapSize; + } + } finally { + region.close(); + } + } + + static HRegion getRegion(WALFactory walFactory, + Configuration conf, String tableName) throws IOException { + HColumnDescriptor hcd = new HColumnDescriptor(FAMILY_NAME); + HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); + htd.setMemStoreFlushSize(Long.MAX_VALUE/1024/*Big number to stop auto-flushing!*/); + htd.addFamily(hcd); + RegionServerServices rss = Mockito.mock(RegionServerServices.class); + Mockito.when(rss.getServerName()).thenReturn(ServerName.valueOf("serverName", 0, 0)); + FlushRequester flushRequester = Mockito.mock(FlushRequester.class); + Mockito.when(rss.getFlushRequester()).thenReturn(flushRequester); + HRegionInfo ri = new HRegionInfo(htd.getTableName()); + Path tableDir = TEST_UTIL.getDataTestDir(); + HRegionFileSystem rfs = new HRegionFileSystem(conf, FileSystem.get(conf), tableDir, ri); + WAL wal = walFactory.getWAL(ri); + HRegion region = new HRegion(rfs, wal, conf, htd, rss); + CancelableProgressable cancelableProgressable = Mockito.mock(CancelableProgressable.class); + return region.openHRegion(cancelableProgressable); + } + + static void load(HRegion region, int totalRows) throws IOException { + for (int i = 0; i < totalRows; i++) { + Put put = new Put(getRandomRow(totalRows)); + for (int column = 0; column < COLUMNS; column++) { + byte[] qualifier = Bytes.toBytes("" + column + 1); + put.addColumn(FAMILY_NAME_BYTES, qualifier, VALUE); + } + region.put(put); + } + } + + // Below methods are stolen from PerformanceEvaluation. TODO: Move into hbase-common. + + private static byte [] getRandomRow(final int totalRows) { + return format(generateRandomRow(totalRows)); + } + + private static byte [] format(final int number) { + byte [] b = new byte[26]; + int d = Math.abs(number); + for (int i = b.length - 1; i >= 0; i--) { + b[i] = (byte)((d % 10) + '0'); + d /= 10; + } + return b; + } + + private static int generateRandomRow(int totalRows) { + return RANDOM.nextInt(Integer.MAX_VALUE) % totalRows; + } + + /* + * This method takes some time and is done inline uploading data. For + * example, doing the mapfile test, generation of the key and value + * consumes about 30% of CPU time. + * @return Generated random value to insert into a table cell. + */ + private static byte[] generateData(int length) { + byte [] b = new byte [length]; + int i; + + for(i = 0; i < (length-8); i += 8) { + b[i] = (byte) (65 + RANDOM.nextInt(26)); + b[i+1] = b[i]; + b[i+2] = b[i]; + b[i+3] = b[i]; + b[i+4] = b[i]; + b[i+5] = b[i]; + b[i+6] = b[i]; + b[i+7] = b[i]; + } + + byte a = (byte) (65 + RANDOM.nextInt(26)); + for(; i < length; i++) { + b[i] = a; + } + return b; + } + + public static void main(String args[]) { + org.junit.runner.JUnitCore.main(TestFlushPerformance.class.getName()); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemstorePerformance.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemstorePerformance.java new file mode 100644 index 0000000000..559f3ba1f9 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemstorePerformance.java @@ -0,0 +1,90 @@ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.CancelableProgressable; +import org.apache.hadoop.hbase.wal.WAL; +import org.apache.hadoop.hbase.wal.WALFactory; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; +import org.mockito.Mockito; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Random; + +import static org.junit.Assert.assertTrue; + + +/** + * Test with a main that is about loading a region memstore over and over again. + * Needed because hbase2 is slower than hbase1 flushing. + * @see TestFlushPerformance + */ +@Category(LargeTests.class) +public class TestMemstorePerformance { + @Rule public TestName name = new TestName(); + + private static final Logger LOG = LoggerFactory.getLogger(TestMemstorePerformance.class); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static WALFactory WALFACTORY; + private static final Random RANDOM = new Random(); + private static final String FAMILY_NAME = "cf"; + private static final byte [] FAMILY_NAME_BYTES = Bytes.toBytes(FAMILY_NAME); + private static final int TOTAL_ROWS = 400000; + private static final int COLUMNS = 10; + private static final byte [] VALUE = Bytes.toBytes("Some old value"); + + @BeforeClass + public static void beforeClass() throws IOException { + // Make it so we use the DisableWALProvider; i.e. no WAL + // TODO: Doc. how to get a disabled WAL. Too hard to figure currently. + Configuration conf = TEST_UTIL.getConfiguration(); + conf.setBoolean("hbase.regionserver.hlog.enabled", false); + assertTrue(!conf.getBoolean("hbase.regionserver.hlog.enabled", true)); + WALFACTORY = new WALFactory(conf, TestMemstorePerformance.class.getSimpleName()); + ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, + 0, 0, null); + } + + @Test + public void testMemstore() throws IOException { + HRegion region = TestFlushPerformance.getRegion(WALFACTORY, TEST_UTIL.getConfiguration(), + this.name.getMethodName()); + try { + // Cycle filling the memstore, snapshotting, dropping the snapshot and so on. + for (;;) { + TestFlushPerformance.load(region, TOTAL_ROWS); + long dataSize = region.getMemStoreDataSize(); + MemStoreSnapshot mss = + ((DefaultMemStore)((HStore)region.getStore(FAMILY_NAME_BYTES)).memstore).snapshot(); + MemStoreSize size = ((HStore)region.getStore(FAMILY_NAME_BYTES)).getSnapshotSize(); + ((DefaultMemStore)((HStore)region.getStore(FAMILY_NAME_BYTES)).memstore). + clearSnapshot(mss.getId()); + region.decrMemStoreSize(size); + LOG.info("dataSize={}, snapshotSize={}, currentSize={}", dataSize, size, + region.getMemStoreDataSize()); + } + } finally { + region.close(); + } + } + + public static void main(String args[]) { + org.junit.runner.JUnitCore.main(TestMemstorePerformance.class.getName()); + } +} -- 2.16.3