From 8f40f6aaaf7bd4cd1a13b0b4763820dff6b50ecb Mon Sep 17 00:00:00 2001 From: eshcar Date: Tue, 13 Dec 2016 23:54:12 +0200 Subject: [PATCH] HBASE-17294: External configuration for memory compaction --- .../org/apache/hadoop/hbase/HColumnDescriptor.java | 47 +++++++++++++++------- .../hbase/regionserver/CompactingMemStore.java | 15 +++++-- .../apache/hadoop/hbase/regionserver/HStore.java | 22 +++++++--- .../hbase/regionserver/MemStoreCompactor.java | 29 +++++-------- .../apache/hadoop/hbase/HBaseTestingUtility.java | 4 +- .../apache/hadoop/hbase/PerformanceEvaluation.java | 16 ++++---- .../apache/hadoop/hbase/TestAcidGuarantees.java | 3 ++ .../org/apache/hadoop/hbase/TestIOFencing.java | 3 ++ .../hadoop/hbase/backup/TestHFileArchiving.java | 5 +++ .../hbase/client/TestMobSnapshotFromClient.java | 4 ++ .../client/TestSnapshotCloneIndependence.java | 3 ++ .../hadoop/hbase/master/TestTableLockManager.java | 9 +++++ .../TestMasterProcedureSchedulerConcurrency.java | 4 ++ .../hbase/regionserver/TestCompactingMemStore.java | 29 ++++++++----- .../TestCompactingToCellArrayMapMemStore.java | 13 +++--- .../hbase/regionserver/TestMajorCompaction.java | 3 ++ .../regionserver/TestPerColumnFamilyFlush.java | 11 ++++- .../hbase/regionserver/TestRecoveredEdits.java | 2 + .../TestWalAndCompactingMemStoreFlush.java | 26 ++++++++---- .../regionserver/wal/AbstractTestLogRolling.java | 22 +++++----- .../regionserver/wal/AbstractTestWALReplay.java | 3 ++ .../regionserver/wal/TestAsyncLogRolling.java | 2 + .../hbase/regionserver/wal/TestLogRolling.java | 14 ++++--- .../snapshot/TestFlushSnapshotFromClient.java | 6 ++- hbase-shell/src/main/ruby/hbase/admin.rb | 2 +- 25 files changed, 205 insertions(+), 92 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index 3d7b145..acbc609 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -66,6 +66,32 @@ public class HColumnDescriptor implements Comparable { public static final String IN_MEMORY_COMPACTION = "IN_MEMORY_COMPACTION"; + /** + * Enum describing all possible memory compaction policies + */ + @InterfaceAudience.Public + @InterfaceStability.Evolving + public enum MemoryCompaction { + /** + * No memory compaction, when size threshold is exceeded data is flushed to disk + */ + NONE, + /** + * Basic policy applies optimizations which modify the index to a more compacted representation. + * This is beneficial in all access patterns. The smaller the cells are the greater the + * benefit of this policy. + * This is the default policy. + */ + BASIC, + /** + * In addition to compacting the index representation as the basic policy, eager policy + * eliminates duplication while the data is still in memory (much like the + * on-disk compaction does after the data is flushed to disk). This policy is most useful for + * applications with high data churn or small working sets. + */ + EAGER + } + // These constants are used as FileInfo keys public static final String COMPRESSION = "COMPRESSION"; public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT"; @@ -174,11 +200,6 @@ public class HColumnDescriptor implements Comparable { public static final boolean DEFAULT_IN_MEMORY = false; /** - * Default setting for whether to set the memstore of this column family as compacting or not. - */ - public static final boolean DEFAULT_IN_MEMORY_COMPACTION = false; - - /** * Default setting for preventing deleted from being collected immediately. */ public static final KeepDeletedCells DEFAULT_KEEP_DELETED = KeepDeletedCells.FALSE; @@ -263,7 +284,6 @@ public class HColumnDescriptor implements Comparable { DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL)); DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE)); DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY)); - DEFAULT_VALUES.put(IN_MEMORY_COMPACTION, String.valueOf(DEFAULT_IN_MEMORY_COMPACTION)); DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE)); DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED)); DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING)); @@ -329,7 +349,6 @@ public class HColumnDescriptor implements Comparable { setMinVersions(DEFAULT_MIN_VERSIONS); setKeepDeletedCells(DEFAULT_KEEP_DELETED); setInMemory(DEFAULT_IN_MEMORY); - setInMemoryCompaction(DEFAULT_IN_MEMORY_COMPACTION); setBlockCacheEnabled(DEFAULT_BLOCKCACHE); setTimeToLive(DEFAULT_TTL); setCompressionType(Compression.Algorithm.valueOf(DEFAULT_COMPRESSION.toUpperCase(Locale.ROOT))); @@ -688,24 +707,24 @@ public class HColumnDescriptor implements Comparable { } /** - * @return True if we prefer to keep the in-memory data compacted + * @return in-memory compaction policy if set for the cf. Returns null if no policy is set for * for this column family */ - public boolean isInMemoryCompaction() { + public MemoryCompaction getInMemoryCompaction() { String value = getValue(IN_MEMORY_COMPACTION); if (value != null) { - return Boolean.parseBoolean(value); + return MemoryCompaction.valueOf(value); } - return DEFAULT_IN_MEMORY_COMPACTION; + return null; } /** - * @param inMemoryCompaction True if we prefer to keep the in-memory data compacted + * @param inMemoryCompaction the prefered in-memory compaction policy * for this column family * @return this (for chained invocation) */ - public HColumnDescriptor setInMemoryCompaction(boolean inMemoryCompaction) { - return setValue(IN_MEMORY_COMPACTION, Boolean.toString(inMemoryCompaction)); + public HColumnDescriptor setInMemoryCompaction(MemoryCompaction inMemoryCompaction) { + return setValue(IN_MEMORY_COMPACTION, inMemoryCompaction.toString()); } public KeepDeletedCells getKeepDeletedCells() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java index a7eb19e..d279f4a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java @@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; @@ -51,6 +52,11 @@ import org.apache.hadoop.hbase.wal.WAL; @InterfaceAudience.Private public class CompactingMemStore extends AbstractMemStore { + // The external setting of the compacting MemStore behaviour + public static final String COMPACTING_MEMSTORE_TYPE_KEY = + "hbase.hregion.compacting.memstore.type"; + public static final String COMPACTING_MEMSTORE_TYPE_DEFAULT = + String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC); // Default fraction of in-memory-flush size w.r.t. flush-to-disk size public static final String IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY = "hbase.memstore.inmemoryflush.threshold.factor"; @@ -75,12 +81,13 @@ public class CompactingMemStore extends AbstractMemStore { + CompactionPipeline.DEEP_OVERHEAD + MemStoreCompactor.DEEP_OVERHEAD; public CompactingMemStore(Configuration conf, CellComparator c, - HStore store, RegionServicesForStores regionServices) throws IOException { + HStore store, RegionServicesForStores regionServices, + HColumnDescriptor.MemoryCompaction compactionPolicy) throws IOException { super(conf, c); this.store = store; this.regionServices = regionServices; this.pipeline = new CompactionPipeline(getRegionServices()); - this.compactor = new MemStoreCompactor(this); + this.compactor = new MemStoreCompactor(this, compactionPolicy); initInmemoryFlushSize(conf); } @@ -416,8 +423,8 @@ public class CompactingMemStore extends AbstractMemStore { } @VisibleForTesting - void initiateType() { - compactor.initiateAction(); + void initiateType(HColumnDescriptor.MemoryCompaction compactionType) { + compactor.initiateAction(compactionType); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index b35c38f..c56abaa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -241,12 +241,22 @@ public class HStore implements Store { // to clone it? scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, this.comparator); String className = conf.get(MEMSTORE_CLASS_NAME, DefaultMemStore.class.getName()); - if (family.isInMemoryCompaction()) { - className = CompactingMemStore.class.getName(); - this.memstore = new CompactingMemStore(conf, this.comparator, this, - this.getHRegion().getRegionServicesForStores()); - } else { - this.memstore = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] { + HColumnDescriptor.MemoryCompaction inMemoryCompaction = family.getInMemoryCompaction(); + if(inMemoryCompaction == null) { + inMemoryCompaction = HColumnDescriptor.MemoryCompaction.valueOf(conf.get + (CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT)); + } + switch (inMemoryCompaction) { + case BASIC : + case EAGER : + className = CompactingMemStore.class.getName(); + this.memstore = new CompactingMemStore(conf, this.comparator, this, + this.getHRegion().getRegionServicesForStores(), inMemoryCompaction); + break; + case NONE : + default: + this.memstore = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] { Configuration.class, CellComparator.class }, new Object[] { conf, this.comparator }); } LOG.info("Memstore class name is " + className); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java index 0df3674..84f88f0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver; import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HColumnDescriptor.MemoryCompaction; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; @@ -53,15 +54,6 @@ public class MemStoreCompactor { + ClassSize.ATOMIC_BOOLEAN // isInterrupted (the internals) ); - // Configuration options for MemStore compaction - static final String INDEX_COMPACTION_CONFIG = "index-compaction"; - static final String DATA_COMPACTION_CONFIG = "data-compaction"; - - // The external setting of the compacting MemStore behaviour - // Compaction of the index without the data is the default - static final String COMPACTING_MEMSTORE_TYPE_KEY = "hbase.hregion.compacting.memstore.type"; - static final String COMPACTING_MEMSTORE_TYPE_DEFAULT = INDEX_COMPACTION_CONFIG; - // The upper bound for the number of segments we store in the pipeline prior to merging. // This constant is subject to further experimentation. private static final int THRESHOLD_PIPELINE_SEGMENTS = 1; @@ -93,11 +85,12 @@ public class MemStoreCompactor { private Action action = Action.FLATTEN; - public MemStoreCompactor(CompactingMemStore compactingMemStore) { + public MemStoreCompactor(CompactingMemStore compactingMemStore, + MemoryCompaction compactionPolicy) { this.compactingMemStore = compactingMemStore; this.compactionKVMax = compactingMemStore.getConfiguration() .getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); - initiateAction(); + initiateAction(compactionPolicy); } /**---------------------------------------------------------------------- @@ -277,17 +270,17 @@ public class MemStoreCompactor { * Initiate the action according to user config, after its default is Action.MERGE */ @VisibleForTesting - void initiateAction() { - String memStoreType = compactingMemStore.getConfiguration().get(COMPACTING_MEMSTORE_TYPE_KEY, - COMPACTING_MEMSTORE_TYPE_DEFAULT); + void initiateAction(MemoryCompaction compType) { - switch (memStoreType) { - case INDEX_COMPACTION_CONFIG: action = Action.MERGE; + switch (compType){ + case NONE: action = Action.NOOP; + break; + case BASIC: action = Action.MERGE; break; - case DATA_COMPACTION_CONFIG: action = Action.COMPACT; + case EAGER: action = Action.COMPACT; break; default: - throw new RuntimeException("Unknown memstore type " + memStoreType); // sanity check + throw new RuntimeException("Unknown memstore type " + compType); // sanity check } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index c74c399..47fed8d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1881,9 +1881,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { for (byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family); if(compactedMemStore != null && i < compactedMemStore.length) { - hcd.setInMemoryCompaction(true); + hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.BASIC); } else { - hcd.setInMemoryCompaction(false); + hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.NONE); } i++; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index d50bf6b..5439bae 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -80,6 +80,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.io.hfile.RandomDistribution; import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil; import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration; import org.apache.hadoop.hbase.trace.SpanReceiverHost; import org.apache.hadoop.hbase.util.*; @@ -375,9 +376,7 @@ public class PerformanceEvaluation extends Configured implements Tool { if (opts.inMemoryCF) { family.setInMemory(true); } - if(opts.inMemoryCompaction) { - family.setInMemoryCompaction(true); - } + family.setInMemoryCompaction(opts.inMemoryCompaction); desc.addFamily(family); if (opts.replicas != DEFAULT_OPTS.replicas) { desc.setRegionReplication(opts.replicas); @@ -636,7 +635,9 @@ public class PerformanceEvaluation extends Configured implements Tool { int columns = 1; int caching = 30; boolean addColumns = true; - boolean inMemoryCompaction = false; + HColumnDescriptor.MemoryCompaction inMemoryCompaction = + HColumnDescriptor.MemoryCompaction.valueOf( + CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT); public TestOptions() {} @@ -981,11 +982,11 @@ public class PerformanceEvaluation extends Configured implements Tool { this.addColumns = addColumns; } - public void setInMemoryCompaction(boolean inMemoryCompaction) { + public void setInMemoryCompaction(HColumnDescriptor.MemoryCompaction inMemoryCompaction) { this.inMemoryCompaction = inMemoryCompaction; } - public boolean getInMemoryCompaction() { + public HColumnDescriptor.MemoryCompaction getInMemoryCompaction() { return this.inMemoryCompaction; } } @@ -2139,7 +2140,8 @@ public class PerformanceEvaluation extends Configured implements Tool { final String inMemoryCompaction = "--inmemoryCompaction="; if (cmd.startsWith(inMemoryCompaction)) { - opts.inMemoryCompaction = Boolean.parseBoolean(cmd.substring(inMemoryCompaction.length())); + opts.inMemoryCompaction = opts.inMemoryCompaction.valueOf(cmd.substring + (inMemoryCompaction.length())); continue; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java index 989192d..e6c7124 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.testclassification.FlakeyTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -100,6 +101,8 @@ public class TestAcidGuarantees implements Tool { conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName()); conf.setInt("hfile.format.version", 3); // for mob tests + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); util = new HBaseTestingUtility(conf); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java index eeb4ebf..2df3c87 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; @@ -249,6 +250,8 @@ public class TestIOFencing { c.setLong("hbase.hstore.blockingStoreFiles", 1000); // Compact quickly after we tell it to! c.setInt("hbase.regionserver.thread.splitcompactcheckfrequency", 1000); + c.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); LOG.info("Starting mini cluster"); TEST_UTIL.startMiniCluster(1); CompactionBlockerRegion compactingRegion = null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java index e30d719..1eaf011 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java @@ -35,11 +35,13 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; +import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; @@ -92,6 +94,9 @@ public class TestHFileArchiving { // prevent aggressive region split conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName()); + // no memory compaction + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); } @After diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java index 268bc14..a48aafd 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java @@ -20,7 +20,9 @@ package org.apache.hadoop.hbase.client; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.mob.MobConstants; +import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -50,6 +52,8 @@ public class TestMobSnapshotFromClient extends TestSnapshotFromClient { protected static void setupConf(Configuration conf) { TestSnapshotFromClient.setupConf(conf); conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java index 2d4b4c9..5863220 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -117,6 +118,8 @@ public class TestSnapshotCloneIndependence { // will even trigger races between creating the directory containing back references and // the back reference itself. conf.setInt("hbase.master.hfilecleaner.ttl", CLEANER_INTERVAL); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); } @Before diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java index 7426437..94b2bc1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java @@ -37,6 +37,7 @@ import java.util.concurrent.Future; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.ChoreService; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; @@ -52,6 +53,7 @@ import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -62,6 +64,7 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.junit.After; +import org.junit.Before; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -96,6 +99,12 @@ public class TestTableLockManager { TEST_UTIL.startMiniZKCluster(1); } + @Before + public void setUp() throws IOException { + TEST_UTIL.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); + } + @After public void tearDown() throws Exception { TEST_UTIL.shutdownMiniCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java index d66ca85..511b3de 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java @@ -28,11 +28,13 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.procedure.TestMasterProcedureScheduler.TestTableProcedure; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.util.StringUtils; +import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -55,6 +57,8 @@ public class TestMasterProcedureSchedulerConcurrency { @Before public void setUp() throws IOException { conf = HBaseConfiguration.create(); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); queue = new MasterProcedureScheduler(conf, new TableLockManager.NullTableLockManager()); queue.start(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java index d1bbd50..b0b63a9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java @@ -82,7 +82,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { public void setUp() throws Exception { compactingSetUp(); this.memstore = new CompactingMemStore(HBaseConfiguration.create(), CellComparator.COMPARATOR, - store, regionServicesForStores); + store, regionServicesForStores, HColumnDescriptor.MemoryCompaction.EAGER); } protected void compactingSetUp() throws Exception { @@ -135,7 +135,8 @@ public class TestCompactingMemStore extends TestDefaultMemStore { // use case 3: first in snapshot second in kvset this.memstore = new CompactingMemStore(HBaseConfiguration.create(), - CellComparator.COMPARATOR, store, regionServicesForStores); + CellComparator.COMPARATOR, store, regionServicesForStores, + HColumnDescriptor.MemoryCompaction.EAGER); this.memstore.add(kv1.clone(), null); // As compaction is starting in the background the repetition // of the k1 might be removed BUT the scanners created earlier @@ -468,8 +469,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore { throws IOException { // set memstore to do data compaction and not to use the speculative scan - memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "data-compaction"); - ((CompactingMemStore)memstore).initiateType(); + HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER; + memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(compactionType)); + ((CompactingMemStore)memstore).initiateType(compactionType); byte[] row = Bytes.toBytes("testrow"); byte[] fam = Bytes.toBytes("testfamily"); @@ -549,8 +552,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore { public void testCompaction1Bucket() throws IOException { // set memstore to do data compaction and not to use the speculative scan - memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "data-compaction"); - ((CompactingMemStore)memstore).initiateType(); + HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER; + memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(compactionType)); + ((CompactingMemStore)memstore).initiateType(compactionType); String[] keys1 = { "A", "A", "B", "C" }; //A1, A2, B3, C4 @@ -584,8 +589,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore { public void testCompaction2Buckets() throws IOException { // set memstore to do data compaction and not to use the speculative scan - memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "data-compaction"); - ((CompactingMemStore)memstore).initiateType(); + HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER; + memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(compactionType)); + ((CompactingMemStore)memstore).initiateType(compactionType); String[] keys1 = { "A", "A", "B", "C" }; String[] keys2 = { "A", "B", "D" }; @@ -637,8 +644,10 @@ public class TestCompactingMemStore extends TestDefaultMemStore { public void testCompaction3Buckets() throws IOException { // set memstore to do data compaction and not to use the speculative scan - memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "data-compaction"); - ((CompactingMemStore)memstore).initiateType(); + HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER; + memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(compactionType)); + ((CompactingMemStore)memstore).initiateType(compactionType); String[] keys1 = { "A", "A", "B", "C" }; String[] keys2 = { "A", "B", "D" }; String[] keys3 = { "D", "B", "B" }; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java index c72cae3..56ae72e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java @@ -62,12 +62,13 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore compactingSetUp(); Configuration conf = HBaseConfiguration.create(); - // set memstore to do data compaction and not to use the speculative scan - conf.set("hbase.hregion.compacting.memstore.type", "data-compaction"); + // set memstore to do data compaction + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER)); this.memstore = new CompactingMemStore(conf, CellComparator.COMPARATOR, store, - regionServicesForStores); + regionServicesForStores, HColumnDescriptor.MemoryCompaction.EAGER); } ////////////////////////////////////////////////////////////////////////////// @@ -266,8 +267,10 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore String[] keys2 = { "A", "B", "D", "G", "I", "J"}; String[] keys3 = { "D", "B", "B", "E" }; - memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "index-compaction"); - ((CompactingMemStore)memstore).initiateType(); + HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.BASIC; + memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(compactionType)); + ((CompactingMemStore)memstore).initiateType(compactionType); addRowsByKeys(memstore, keys1); ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline should not compact diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java index 3ef89ad..381a73f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.client.Delete; @@ -94,6 +95,8 @@ public class TestMajorCompaction { conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024); conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100); compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); secondRowBytes = START_KEY_BYTES.clone(); // Increment the least significant character so we get to next row. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java index 5157868..be41e54 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; import org.apache.hadoop.hbase.wal.WAL; +import org.junit.Before; import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -82,6 +83,12 @@ public class TestPerColumnFamilyFlush { public static final byte[] FAMILY3 = FAMILIES[2]; + @Before + public void setUp() throws IOException { + TEST_UTIL.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); + } + private HRegion initHRegion(String callingMethod, Configuration conf) throws IOException { HTableDescriptor htd = new HTableDescriptor(TABLENAME); for (byte[] family : FAMILIES) { @@ -128,7 +135,9 @@ public class TestPerColumnFamilyFlush { conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 200 * 1024); conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushAllLargeStoresPolicy.class.getName()); conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, - 40 * 1024); + 40 * 1024); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); // Intialize the region Region region = initHRegion("testSelectiveFlushWithDataCompaction", conf); // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3 diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java index fc47d7e..e619a9a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java @@ -74,6 +74,8 @@ public class TestRecoveredEdits { Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); // Set it so we flush every 1M or so. Thats a lot. conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); // The file of recovered edits has a column family of 'meta'. Also has an encoded regionname // of 4823016d8fca70b25503ee07f4c6d79f which needs to match on replay. final String encodedRegionName = "4823016d8fca70b25503ee07f4c6d79f"; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java index 35159b6..133c53b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java @@ -71,7 +71,12 @@ public class TestWalAndCompactingMemStoreFlush { for (byte[] family : FAMILIES) { HColumnDescriptor hcd = new HColumnDescriptor(family); // even column families are going to have compacted memstore - if(i%2 == 0) hcd.setInMemoryCompaction(true); + if(i%2 == 0) { + hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.valueOf( + conf.get(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY))); + } else { + hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.NONE); + } htd.addFamily(hcd); i++; } @@ -123,7 +128,7 @@ public class TestWalAndCompactingMemStoreFlush { } @Test(timeout = 180000) - public void testSelectiveFlushWithDataCompaction() throws IOException { + public void testSelectiveFlushWithEager() throws IOException { // Set up the configuration Configuration conf = HBaseConfiguration.create(); @@ -133,10 +138,11 @@ public class TestWalAndCompactingMemStoreFlush { conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 75 * 1024); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.25); // set memstore to do data compaction - conf.set("hbase.hregion.compacting.memstore.type", "data-compaction"); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER)); // Intialize the region - Region region = initHRegion("testSelectiveFlushWithDataCompaction", conf); + Region region = initHRegion("testSelectiveFlushWithEager", conf); // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3 for (int i = 1; i <= 1200; i++) { @@ -368,7 +374,8 @@ public class TestWalAndCompactingMemStoreFlush { conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 75 * 1024); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); // set memstore to index-compaction - conf.set("hbase.hregion.compacting.memstore.type", "index-compaction"); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC)); // Initialize the region Region region = initHRegion("testSelectiveFlushWithIndexCompaction", conf); @@ -621,7 +628,8 @@ public class TestWalAndCompactingMemStoreFlush { 1024); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); // set memstore to do data compaction and not to use the speculative scan - conf.set("hbase.hregion.compacting.memstore.type", "data-compaction"); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER)); // Intialize the HRegion HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf); @@ -751,7 +759,8 @@ public class TestWalAndCompactingMemStoreFlush { 200 * 1024); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); // set memstore to do data compaction and not to use the speculative scan - conf.set("hbase.hregion.compacting.memstore.type", "index-compaction"); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC)); // Intialize the HRegion HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf); @@ -874,7 +883,8 @@ public class TestWalAndCompactingMemStoreFlush { 200 * 1024); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); // set memstore to do data compaction and not to use the speculative scan - conf.set("hbase.hregion.compacting.memstore.type", "index-compaction"); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC)); // Successfully initialize the HRegion HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java index 1aa077d..1923b4a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.Store; @@ -92,27 +93,30 @@ public abstract class AbstractTestLogRolling { /**** configuration for testLogRolling ****/ // Force a region split after every 768KB - TEST_UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, 768L * 1024L); + Configuration conf= TEST_UTIL.getConfiguration(); + conf.setLong(HConstants.HREGION_MAX_FILESIZE, 768L * 1024L); // We roll the log after every 32 writes - TEST_UTIL.getConfiguration().setInt("hbase.regionserver.maxlogentries", 32); + conf.setInt("hbase.regionserver.maxlogentries", 32); - TEST_UTIL.getConfiguration().setInt("hbase.regionserver.logroll.errors.tolerated", 2); - TEST_UTIL.getConfiguration().setInt("hbase.rpc.timeout", 10 * 1000); + conf.setInt("hbase.regionserver.logroll.errors.tolerated", 2); + conf.setInt("hbase.rpc.timeout", 10 * 1000); // For less frequently updated regions flush after every 2 flushes - TEST_UTIL.getConfiguration().setInt("hbase.hregion.memstore.optionalflushcount", 2); + conf.setInt("hbase.hregion.memstore.optionalflushcount", 2); // We flush the cache after every 8192 bytes - TEST_UTIL.getConfiguration().setInt( - HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 8192); + conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 8192); // Increase the amount of time between client retries - TEST_UTIL.getConfiguration().setLong("hbase.client.pause", 10 * 1000); + conf.setLong("hbase.client.pause", 10 * 1000); // Reduce thread wake frequency so that other threads can get // a chance to run. - TEST_UTIL.getConfiguration().setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000); + conf.setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000); + + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); } @Before diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java index ca2ec85..84bdc69 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java @@ -73,6 +73,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.monitoring.MonitoredTask; +import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; import org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher; @@ -138,6 +139,8 @@ public abstract class AbstractTestWALReplay { Configuration conf = TEST_UTIL.getConfiguration(); // The below config supported by 0.20-append and CDH3b2 conf.setInt("dfs.client.block.recovery.retries", 2); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); TEST_UTIL.startMiniCluster(3); Path hbaseRootDir = TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java index fabf6d2..a412d27 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncLogRolling.java @@ -22,8 +22,10 @@ import static org.junit.Assert.assertEquals; import java.io.IOException; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests; import org.apache.hadoop.hbase.wal.AsyncFSWALProvider; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java index e4d4c5b..db2c241 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java @@ -32,6 +32,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -74,14 +75,15 @@ public class TestLogRolling extends AbstractTestLogRolling { /**** configuration for testLogRollOnDatanodeDeath ****/ // lower the namenode & datanode heartbeat so the namenode // quickly detects datanode failures - TEST_UTIL.getConfiguration().setInt("dfs.namenode.heartbeat.recheck-interval", 5000); - TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1); + Configuration conf= TEST_UTIL.getConfiguration(); + conf.setInt("dfs.namenode.heartbeat.recheck-interval", 5000); + conf.setInt("dfs.heartbeat.interval", 1); // the namenode might still try to choose the recently-dead datanode // for a pipeline, so try to a new pipeline multiple times - TEST_UTIL.getConfiguration().setInt("dfs.client.block.write.retries", 30); - TEST_UTIL.getConfiguration().setInt("hbase.regionserver.hlog.tolerable.lowreplication", 2); - TEST_UTIL.getConfiguration().setInt("hbase.regionserver.hlog.lowreplication.rolllimit", 3); - TEST_UTIL.getConfiguration().set(WALFactory.WAL_PROVIDER, "filesystem"); + conf.setInt("dfs.client.block.write.retries", 30); + conf.setInt("hbase.regionserver.hlog.tolerable.lowreplication", 2); + conf.setInt("hbase.regionserver.hlog.lowreplication.rolllimit", 3); + conf.set(WALFactory.WAL_PROVIDER, "filesystem"); AbstractTestLogRolling.setUpBeforeClass(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java index 629a3a7..92071d1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CategoryBasedTimeout; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.TableName; @@ -45,6 +46,7 @@ import org.apache.hadoop.hbase.client.SnapshotType; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; +import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.client.SnapshotDescription; @@ -103,7 +105,9 @@ public class TestFlushSnapshotFromClient { // Enable snapshot conf.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true); conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, - ConstantSizeRegionSplitPolicy.class.getName()); + ConstantSizeRegionSplitPolicy.class.getName()); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); } @Before diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 67dde53..c4061c1 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -816,7 +816,7 @@ module Hbase family.setCacheDataOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE) family.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY) family.setInMemoryCompaction( - JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION) + org.apache.hadoop.hbase.HColumnDescriptor.MemoryCompaction.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION) family.setTimeToLive(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::TTL)) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL) family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING) family.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE) -- 2.9.3 (Apple Git-75)