From 7a942985d8169de5ee4f59b2049de513b01de0d1 Mon Sep 17 00:00:00 2001 From: eshcar Date: Mon, 12 Dec 2016 10:53:21 +0200 Subject: [PATCH] HBASE-17294: External configuration for memory compaction --- .../org/apache/hadoop/hbase/HColumnDescriptor.java | 31 +++++++++++----------- .../hbase/regionserver/CompactingMemStore.java | 11 ++++++-- .../apache/hadoop/hbase/regionserver/HStore.java | 22 ++++++++++----- .../hbase/regionserver/MemStoreCompactor.java | 31 +++++++++++----------- .../apache/hadoop/hbase/HBaseTestingUtility.java | 4 +-- .../apache/hadoop/hbase/PerformanceEvaluation.java | 2 +- .../apache/hadoop/hbase/TestAcidGuarantees.java | 3 +++ .../org/apache/hadoop/hbase/TestIOFencing.java | 3 +++ .../hadoop/hbase/backup/TestHFileArchiving.java | 5 ++++ .../TestMasterProcedureSchedulerConcurrency.java | 4 +++ .../hbase/regionserver/TestCompactingMemStore.java | 17 +++++++----- .../TestCompactingToCellArrayMapMemStore.java | 10 ++++--- .../hbase/regionserver/TestMajorCompaction.java | 3 +++ .../hbase/regionserver/TestRecoveredEdits.java | 2 ++ .../TestWalAndCompactingMemStoreFlush.java | 26 ++++++++++++------ .../regionserver/wal/AbstractTestWALReplay.java | 3 +++ hbase-shell/src/main/ruby/hbase/admin.rb | 2 +- 17 files changed, 117 insertions(+), 62 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index 3d7b145..a0d9379 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -66,6 +66,14 @@ public class HColumnDescriptor implements Comparable { public static final String IN_MEMORY_COMPACTION = "IN_MEMORY_COMPACTION"; + @InterfaceAudience.Public + @InterfaceStability.Evolving + public enum MemoryCompaction { + NONE, + BASIC, + EAGER + } + // These constants are used as FileInfo keys public static final String COMPRESSION = "COMPRESSION"; public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT"; @@ -174,11 +182,6 @@ public class HColumnDescriptor implements Comparable { public static final boolean DEFAULT_IN_MEMORY = false; /** - * Default setting for whether to set the memstore of this column family as compacting or not. - */ - public static final boolean DEFAULT_IN_MEMORY_COMPACTION = false; - - /** * Default setting for preventing deleted from being collected immediately. */ public static final KeepDeletedCells DEFAULT_KEEP_DELETED = KeepDeletedCells.FALSE; @@ -263,7 +266,6 @@ public class HColumnDescriptor implements Comparable { DEFAULT_VALUES.put(TTL, String.valueOf(DEFAULT_TTL)); DEFAULT_VALUES.put(BLOCKSIZE, String.valueOf(DEFAULT_BLOCKSIZE)); DEFAULT_VALUES.put(HConstants.IN_MEMORY, String.valueOf(DEFAULT_IN_MEMORY)); - DEFAULT_VALUES.put(IN_MEMORY_COMPACTION, String.valueOf(DEFAULT_IN_MEMORY_COMPACTION)); DEFAULT_VALUES.put(BLOCKCACHE, String.valueOf(DEFAULT_BLOCKCACHE)); DEFAULT_VALUES.put(KEEP_DELETED_CELLS, String.valueOf(DEFAULT_KEEP_DELETED)); DEFAULT_VALUES.put(DATA_BLOCK_ENCODING, String.valueOf(DEFAULT_DATA_BLOCK_ENCODING)); @@ -329,7 +331,6 @@ public class HColumnDescriptor implements Comparable { setMinVersions(DEFAULT_MIN_VERSIONS); setKeepDeletedCells(DEFAULT_KEEP_DELETED); setInMemory(DEFAULT_IN_MEMORY); - setInMemoryCompaction(DEFAULT_IN_MEMORY_COMPACTION); setBlockCacheEnabled(DEFAULT_BLOCKCACHE); setTimeToLive(DEFAULT_TTL); setCompressionType(Compression.Algorithm.valueOf(DEFAULT_COMPRESSION.toUpperCase(Locale.ROOT))); @@ -688,24 +689,22 @@ public class HColumnDescriptor implements Comparable { } /** - * @return True if we prefer to keep the in-memory data compacted - * for this column family + * @return in-memory compaction policy if set for the cf */ - public boolean isInMemoryCompaction() { + public MemoryCompaction getInMemoryCompaction() { String value = getValue(IN_MEMORY_COMPACTION); if (value != null) { - return Boolean.parseBoolean(value); + return MemoryCompaction.valueOf(value); } - return DEFAULT_IN_MEMORY_COMPACTION; + return null; } /** - * @param inMemoryCompaction True if we prefer to keep the in-memory data compacted - * for this column family + * @param inMemoryCompaction the prefered in-memory compaction policy * @return this (for chained invocation) */ - public HColumnDescriptor setInMemoryCompaction(boolean inMemoryCompaction) { - return setValue(IN_MEMORY_COMPACTION, Boolean.toString(inMemoryCompaction)); + public HColumnDescriptor setInMemoryCompaction(MemoryCompaction inMemoryCompaction) { + return setValue(IN_MEMORY_COMPACTION, inMemoryCompaction.toString()); } public KeepDeletedCells getKeepDeletedCells() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java index a7eb19e..d8175a3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java @@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; @@ -51,6 +52,11 @@ import org.apache.hadoop.hbase.wal.WAL; @InterfaceAudience.Private public class CompactingMemStore extends AbstractMemStore { + // The external setting of the compacting MemStore behaviour + public static final String COMPACTING_MEMSTORE_TYPE_KEY = + "hbase.hregion.compacting.memstore.type"; + public static final String COMPACTING_MEMSTORE_TYPE_DEFAULT = + String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC); // Default fraction of in-memory-flush size w.r.t. flush-to-disk size public static final String IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY = "hbase.memstore.inmemoryflush.threshold.factor"; @@ -75,12 +81,13 @@ public class CompactingMemStore extends AbstractMemStore { + CompactionPipeline.DEEP_OVERHEAD + MemStoreCompactor.DEEP_OVERHEAD; public CompactingMemStore(Configuration conf, CellComparator c, - HStore store, RegionServicesForStores regionServices) throws IOException { + HStore store, RegionServicesForStores regionServices, + HColumnDescriptor.MemoryCompaction compactionPolicy) throws IOException { super(conf, c); this.store = store; this.regionServices = regionServices; this.pipeline = new CompactionPipeline(getRegionServices()); - this.compactor = new MemStoreCompactor(this); + this.compactor = new MemStoreCompactor(this, compactionPolicy); initInmemoryFlushSize(conf); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index b35c38f..c56abaa 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -241,12 +241,22 @@ public class HStore implements Store { // to clone it? scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, this.comparator); String className = conf.get(MEMSTORE_CLASS_NAME, DefaultMemStore.class.getName()); - if (family.isInMemoryCompaction()) { - className = CompactingMemStore.class.getName(); - this.memstore = new CompactingMemStore(conf, this.comparator, this, - this.getHRegion().getRegionServicesForStores()); - } else { - this.memstore = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] { + HColumnDescriptor.MemoryCompaction inMemoryCompaction = family.getInMemoryCompaction(); + if(inMemoryCompaction == null) { + inMemoryCompaction = HColumnDescriptor.MemoryCompaction.valueOf(conf.get + (CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT)); + } + switch (inMemoryCompaction) { + case BASIC : + case EAGER : + className = CompactingMemStore.class.getName(); + this.memstore = new CompactingMemStore(conf, this.comparator, this, + this.getHRegion().getRegionServicesForStores(), inMemoryCompaction); + break; + case NONE : + default: + this.memstore = ReflectionUtils.instantiateWithCustomCtor(className, new Class[] { Configuration.class, CellComparator.class }, new Object[] { conf, this.comparator }); } LOG.info("Memstore class name is " + className); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java index 0df3674..db64e3f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver; import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; @@ -29,6 +30,9 @@ import org.apache.hadoop.hbase.util.ClassSize; import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; +import static org.apache.hadoop.hbase.HColumnDescriptor.*; +import static org.apache.hadoop.hbase.HColumnDescriptor.MemoryCompaction.*; + /** * The ongoing MemStore Compaction manager, dispatches a solo running compaction and interrupts * the compaction if requested. The compaction is interrupted and stopped by CompactingMemStore, @@ -53,15 +57,6 @@ public class MemStoreCompactor { + ClassSize.ATOMIC_BOOLEAN // isInterrupted (the internals) ); - // Configuration options for MemStore compaction - static final String INDEX_COMPACTION_CONFIG = "index-compaction"; - static final String DATA_COMPACTION_CONFIG = "data-compaction"; - - // The external setting of the compacting MemStore behaviour - // Compaction of the index without the data is the default - static final String COMPACTING_MEMSTORE_TYPE_KEY = "hbase.hregion.compacting.memstore.type"; - static final String COMPACTING_MEMSTORE_TYPE_DEFAULT = INDEX_COMPACTION_CONFIG; - // The upper bound for the number of segments we store in the pipeline prior to merging. // This constant is subject to further experimentation. private static final int THRESHOLD_PIPELINE_SEGMENTS = 1; @@ -93,7 +88,8 @@ public class MemStoreCompactor { private Action action = Action.FLATTEN; - public MemStoreCompactor(CompactingMemStore compactingMemStore) { + public MemStoreCompactor(CompactingMemStore compactingMemStore, + MemoryCompaction compactionPolicy) { this.compactingMemStore = compactingMemStore; this.compactionKVMax = compactingMemStore.getConfiguration() .getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); @@ -278,16 +274,19 @@ public class MemStoreCompactor { */ @VisibleForTesting void initiateAction() { - String memStoreType = compactingMemStore.getConfiguration().get(COMPACTING_MEMSTORE_TYPE_KEY, - COMPACTING_MEMSTORE_TYPE_DEFAULT); + String compType = compactingMemStore.getConfiguration().get( + CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(BASIC)); - switch (memStoreType) { - case INDEX_COMPACTION_CONFIG: action = Action.MERGE; + switch (valueOf(compType)){ + case NONE: action = Action.NOOP; + break; + case BASIC: action = Action.MERGE; break; - case DATA_COMPACTION_CONFIG: action = Action.COMPACT; + case EAGER: action = Action.COMPACT; break; default: - throw new RuntimeException("Unknown memstore type " + memStoreType); // sanity check + throw new RuntimeException("Unknown memstore type " + compType); // sanity check } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index c74c399..47fed8d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1881,9 +1881,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { for (byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family); if(compactedMemStore != null && i < compactedMemStore.length) { - hcd.setInMemoryCompaction(true); + hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.BASIC); } else { - hcd.setInMemoryCompaction(false); + hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.NONE); } i++; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index d50bf6b..fc25cc1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -376,7 +376,7 @@ public class PerformanceEvaluation extends Configured implements Tool { family.setInMemory(true); } if(opts.inMemoryCompaction) { - family.setInMemoryCompaction(true); + family.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.BASIC); } desc.addFamily(family); if (opts.replicas != DEFAULT_OPTS.replicas) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java index 989192d..e6c7124 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.testclassification.FlakeyTests; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -100,6 +101,8 @@ public class TestAcidGuarantees implements Tool { conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName()); conf.setInt("hfile.format.version", 3); // for mob tests + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); util = new HBaseTestingUtility(conf); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java index eeb4ebf..2df3c87 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; @@ -249,6 +250,8 @@ public class TestIOFencing { c.setLong("hbase.hstore.blockingStoreFiles", 1000); // Compact quickly after we tell it to! c.setInt("hbase.regionserver.thread.splitcompactcheckfrequency", 1000); + c.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); LOG.info("Starting mini cluster"); TEST_UTIL.startMiniCluster(1); CompactionBlockerRegion compactingRegion = null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java index e30d719..1eaf011 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java @@ -35,11 +35,13 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.ChoreService; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; +import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; @@ -92,6 +94,9 @@ public class TestHFileArchiving { // prevent aggressive region split conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName()); + // no memory compaction + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); } @After diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java index d66ca85..511b3de 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java @@ -28,11 +28,13 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.procedure.TestMasterProcedureScheduler.TestTableProcedure; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.procedure2.util.StringUtils; +import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -55,6 +57,8 @@ public class TestMasterProcedureSchedulerConcurrency { @Before public void setUp() throws IOException { conf = HBaseConfiguration.create(); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); queue = new MasterProcedureScheduler(conf, new TableLockManager.NullTableLockManager()); queue.start(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java index d1bbd50..7d386c6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java @@ -82,7 +82,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { public void setUp() throws Exception { compactingSetUp(); this.memstore = new CompactingMemStore(HBaseConfiguration.create(), CellComparator.COMPARATOR, - store, regionServicesForStores); + store, regionServicesForStores, HColumnDescriptor.MemoryCompaction.EAGER); } protected void compactingSetUp() throws Exception { @@ -135,7 +135,8 @@ public class TestCompactingMemStore extends TestDefaultMemStore { // use case 3: first in snapshot second in kvset this.memstore = new CompactingMemStore(HBaseConfiguration.create(), - CellComparator.COMPARATOR, store, regionServicesForStores); + CellComparator.COMPARATOR, store, regionServicesForStores, + HColumnDescriptor.MemoryCompaction.EAGER); this.memstore.add(kv1.clone(), null); // As compaction is starting in the background the repetition // of the k1 might be removed BUT the scanners created earlier @@ -468,7 +469,8 @@ public class TestCompactingMemStore extends TestDefaultMemStore { throws IOException { // set memstore to do data compaction and not to use the speculative scan - memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "data-compaction"); + memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER)); ((CompactingMemStore)memstore).initiateType(); byte[] row = Bytes.toBytes("testrow"); @@ -549,7 +551,8 @@ public class TestCompactingMemStore extends TestDefaultMemStore { public void testCompaction1Bucket() throws IOException { // set memstore to do data compaction and not to use the speculative scan - memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "data-compaction"); + memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER)); ((CompactingMemStore)memstore).initiateType(); String[] keys1 = { "A", "A", "B", "C" }; //A1, A2, B3, C4 @@ -584,7 +587,8 @@ public class TestCompactingMemStore extends TestDefaultMemStore { public void testCompaction2Buckets() throws IOException { // set memstore to do data compaction and not to use the speculative scan - memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "data-compaction"); + memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER)); ((CompactingMemStore)memstore).initiateType(); String[] keys1 = { "A", "A", "B", "C" }; String[] keys2 = { "A", "B", "D" }; @@ -637,7 +641,8 @@ public class TestCompactingMemStore extends TestDefaultMemStore { public void testCompaction3Buckets() throws IOException { // set memstore to do data compaction and not to use the speculative scan - memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "data-compaction"); + memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER)); ((CompactingMemStore)memstore).initiateType(); String[] keys1 = { "A", "A", "B", "C" }; String[] keys2 = { "A", "B", "D" }; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java index c72cae3..8384a19 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java @@ -62,12 +62,13 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore compactingSetUp(); Configuration conf = HBaseConfiguration.create(); - // set memstore to do data compaction and not to use the speculative scan - conf.set("hbase.hregion.compacting.memstore.type", "data-compaction"); + // set memstore to do data compaction + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER)); this.memstore = new CompactingMemStore(conf, CellComparator.COMPARATOR, store, - regionServicesForStores); + regionServicesForStores, HColumnDescriptor.MemoryCompaction.BASIC); } ////////////////////////////////////////////////////////////////////////////// @@ -266,7 +267,8 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore String[] keys2 = { "A", "B", "D", "G", "I", "J"}; String[] keys3 = { "D", "B", "B", "E" }; - memstore.getConfiguration().set("hbase.hregion.compacting.memstore.type", "index-compaction"); + memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC)); ((CompactingMemStore)memstore).initiateType(); addRowsByKeys(memstore, keys1); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java index 3ef89ad..381a73f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.client.Delete; @@ -94,6 +95,8 @@ public class TestMajorCompaction { conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024); conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100); compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); secondRowBytes = START_KEY_BYTES.clone(); // Increment the least significant character so we get to next row. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java index fc47d7e..e619a9a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java @@ -74,6 +74,8 @@ public class TestRecoveredEdits { Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); // Set it so we flush every 1M or so. Thats a lot. conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); // The file of recovered edits has a column family of 'meta'. Also has an encoded regionname // of 4823016d8fca70b25503ee07f4c6d79f which needs to match on replay. final String encodedRegionName = "4823016d8fca70b25503ee07f4c6d79f"; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java index 35159b6..133c53b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java @@ -71,7 +71,12 @@ public class TestWalAndCompactingMemStoreFlush { for (byte[] family : FAMILIES) { HColumnDescriptor hcd = new HColumnDescriptor(family); // even column families are going to have compacted memstore - if(i%2 == 0) hcd.setInMemoryCompaction(true); + if(i%2 == 0) { + hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.valueOf( + conf.get(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY))); + } else { + hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.NONE); + } htd.addFamily(hcd); i++; } @@ -123,7 +128,7 @@ public class TestWalAndCompactingMemStoreFlush { } @Test(timeout = 180000) - public void testSelectiveFlushWithDataCompaction() throws IOException { + public void testSelectiveFlushWithEager() throws IOException { // Set up the configuration Configuration conf = HBaseConfiguration.create(); @@ -133,10 +138,11 @@ public class TestWalAndCompactingMemStoreFlush { conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 75 * 1024); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.25); // set memstore to do data compaction - conf.set("hbase.hregion.compacting.memstore.type", "data-compaction"); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER)); // Intialize the region - Region region = initHRegion("testSelectiveFlushWithDataCompaction", conf); + Region region = initHRegion("testSelectiveFlushWithEager", conf); // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3 for (int i = 1; i <= 1200; i++) { @@ -368,7 +374,8 @@ public class TestWalAndCompactingMemStoreFlush { conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 75 * 1024); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); // set memstore to index-compaction - conf.set("hbase.hregion.compacting.memstore.type", "index-compaction"); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC)); // Initialize the region Region region = initHRegion("testSelectiveFlushWithIndexCompaction", conf); @@ -621,7 +628,8 @@ public class TestWalAndCompactingMemStoreFlush { 1024); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); // set memstore to do data compaction and not to use the speculative scan - conf.set("hbase.hregion.compacting.memstore.type", "data-compaction"); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER)); // Intialize the HRegion HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf); @@ -751,7 +759,8 @@ public class TestWalAndCompactingMemStoreFlush { 200 * 1024); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); // set memstore to do data compaction and not to use the speculative scan - conf.set("hbase.hregion.compacting.memstore.type", "index-compaction"); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC)); // Intialize the HRegion HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf); @@ -874,7 +883,8 @@ public class TestWalAndCompactingMemStoreFlush { 200 * 1024); conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); // set memstore to do data compaction and not to use the speculative scan - conf.set("hbase.hregion.compacting.memstore.type", "index-compaction"); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC)); // Successfully initialize the HRegion HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java index ca2ec85..84bdc69 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java @@ -73,6 +73,7 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.master.HMaster; import org.apache.hadoop.hbase.monitoring.MonitoredTask; +import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode; import org.apache.hadoop.hbase.regionserver.DefaultStoreEngine; import org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher; @@ -138,6 +139,8 @@ public abstract class AbstractTestWALReplay { Configuration conf = TEST_UTIL.getConfiguration(); // The below config supported by 0.20-append and CDH3b2 conf.setInt("dfs.client.block.recovery.retries", 2); + conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); TEST_UTIL.startMiniCluster(3); Path hbaseRootDir = TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase")); diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 67dde53..c4061c1 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -816,7 +816,7 @@ module Hbase family.setCacheDataOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE) family.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY) family.setInMemoryCompaction( - JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION) + org.apache.hadoop.hbase.HColumnDescriptor.MemoryCompaction.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION) family.setTimeToLive(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::TTL)) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL) family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING) family.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE) -- 2.9.3 (Apple Git-75)