From 1e43c6d798e88cb2ad01e6798266dd0422605938 Mon Sep 17 00:00:00 2001 From: anastas Date: Sun, 22 Jan 2017 15:44:41 +0200 Subject: [PATCH] My squashed commits --- .../org/apache/hadoop/hbase/HColumnDescriptor.java | 32 ++------------- .../hadoop/hbase/MemoryCompactionPolicy.java | 48 ++++++++++++++++++++++ .../hbase/regionserver/CompactingMemStore.java | 7 ++-- .../apache/hadoop/hbase/regionserver/HStore.java | 15 ++----- .../hbase/regionserver/MemStoreCompactor.java | 6 +-- .../apache/hadoop/hbase/HBaseTestingUtility.java | 4 +- .../apache/hadoop/hbase/PerformanceEvaluation.java | 8 ++-- .../apache/hadoop/hbase/TestAcidGuarantees.java | 2 +- .../org/apache/hadoop/hbase/TestIOFencing.java | 2 +- .../hadoop/hbase/backup/TestHFileArchiving.java | 9 +--- .../client/TestAsyncTableGetMultiThreaded.java | 10 ++--- ...ncTableGetMultiThreadedWithBasicCompaction.java | 3 +- ...ncTableGetMultiThreadedWithEagerCompaction.java | 3 +- .../hbase/client/TestMobSnapshotFromClient.java | 3 +- .../client/TestSnapshotCloneIndependence.java | 10 +---- .../hbase/client/TestSnapshotFromClient.java | 9 +--- .../hadoop/hbase/master/TestTableLockManager.java | 16 +------- .../TestMasterProcedureSchedulerConcurrency.java | 3 +- .../hbase/regionserver/TestCompactingMemStore.java | 22 ++++------ .../TestCompactingToCellArrayMapMemStore.java | 6 +-- .../hadoop/hbase/regionserver/TestCompaction.java | 10 +---- .../hbase/regionserver/TestMajorCompaction.java | 10 +---- .../regionserver/TestPerColumnFamilyFlush.java | 15 ++----- .../hbase/regionserver/TestRecoveredEdits.java | 13 +----- .../TestWalAndCompactingMemStoreFlush.java | 22 ++++------ .../regionserver/wal/AbstractTestLogRolling.java | 11 +---- .../regionserver/wal/AbstractTestWALReplay.java | 16 +------- .../snapshot/TestFlushSnapshotFromClient.java | 10 +---- hbase-shell/src/main/ruby/hbase/admin.rb | 2 +- 29 files changed, 123 insertions(+), 204 deletions(-) create mode 100644 hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index 028ab76..0d557aa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -66,32 +66,6 @@ public class HColumnDescriptor implements Comparable { public static final String IN_MEMORY_COMPACTION = "IN_MEMORY_COMPACTION"; - /** - * Enum describing all possible memory compaction policies - */ - @InterfaceAudience.Public - @InterfaceStability.Evolving - public enum MemoryCompaction { - /** - * No memory compaction, when size threshold is exceeded data is flushed to disk - */ - NONE, - /** - * Basic policy applies optimizations which modify the index to a more compacted representation. - * This is beneficial in all access patterns. The smaller the cells are the greater the - * benefit of this policy. - * This is the default policy. - */ - BASIC, - /** - * In addition to compacting the index representation as the basic policy, eager policy - * eliminates duplication while the data is still in memory (much like the - * on-disk compaction does after the data is flushed to disk). This policy is most useful for - * applications with high data churn or small working sets. - */ - EAGER - } - // These constants are used as FileInfo keys public static final String COMPRESSION = "COMPRESSION"; public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT"; @@ -712,10 +686,10 @@ public class HColumnDescriptor implements Comparable { * @return in-memory compaction policy if set for the cf. Returns null if no policy is set for * for this column family */ - public MemoryCompaction getInMemoryCompaction() { + public MemoryCompactionPolicy getInMemoryCompaction() { String value = getValue(IN_MEMORY_COMPACTION); if (value != null) { - return MemoryCompaction.valueOf(value); + return MemoryCompactionPolicy.valueOf(value); } return null; } @@ -725,7 +699,7 @@ public class HColumnDescriptor implements Comparable { * for this column family * @return this (for chained invocation) */ - public HColumnDescriptor setInMemoryCompaction(MemoryCompaction inMemoryCompaction) { + public HColumnDescriptor setInMemoryCompaction(MemoryCompactionPolicy inMemoryCompaction) { return setValue(IN_MEMORY_COMPACTION, inMemoryCompaction.toString()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java new file mode 100644 index 0000000..0153f7d --- /dev/null +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java @@ -0,0 +1,48 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Enum describing all possible memory compaction policies + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public enum MemoryCompactionPolicy { + /** + * No memory compaction, when size threshold is exceeded data is flushed to disk + */ + NONE, + /** + * Basic policy applies optimizations which modify the index to a more compacted representation. + * This is beneficial in all access patterns. The smaller the cells are the greater the + * benefit of this policy. + * This is the default policy. + */ + BASIC, + /** + * In addition to compacting the index representation as the basic policy, eager policy + * eliminates duplication while the data is still in memory (much like the + * on-disk compaction does after the data is flushed to disk). This policy is most useful for + * applications with high data churn or small working sets. + */ + EAGER +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java index ed7d274..0f45c9c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java @@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.MemoryCompactionPolicy; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; @@ -56,7 +57,7 @@ public class CompactingMemStore extends AbstractMemStore { public static final String COMPACTING_MEMSTORE_TYPE_KEY = "hbase.hregion.compacting.memstore.type"; public static final String COMPACTING_MEMSTORE_TYPE_DEFAULT = - String.valueOf(HColumnDescriptor.MemoryCompaction.NONE); + String.valueOf(MemoryCompactionPolicy.NONE); // Default fraction of in-memory-flush size w.r.t. flush-to-disk size public static final String IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY = "hbase.memstore.inmemoryflush.threshold.factor"; @@ -83,7 +84,7 @@ public class CompactingMemStore extends AbstractMemStore { public CompactingMemStore(Configuration conf, CellComparator c, HStore store, RegionServicesForStores regionServices, - HColumnDescriptor.MemoryCompaction compactionPolicy) throws IOException { + MemoryCompactionPolicy compactionPolicy) throws IOException { super(conf, c); this.store = store; this.regionServices = regionServices; @@ -482,7 +483,7 @@ public class CompactingMemStore extends AbstractMemStore { } @VisibleForTesting - void initiateType(HColumnDescriptor.MemoryCompaction compactionType) { + void initiateType(MemoryCompactionPolicy compactionType) { compactor.initiateAction(compactionType); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 2a93b70..0750223 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -53,14 +53,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.CompoundConfiguration; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.backup.FailedArchiveException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; @@ -259,10 +252,10 @@ public class HStore implements Store { // to clone it? scanInfo = new ScanInfo(conf, family, ttl, timeToPurgeDeletes, this.comparator); String className = conf.get(MEMSTORE_CLASS_NAME, DefaultMemStore.class.getName()); - HColumnDescriptor.MemoryCompaction inMemoryCompaction = family.getInMemoryCompaction(); + MemoryCompactionPolicy inMemoryCompaction = family.getInMemoryCompaction(); if(inMemoryCompaction == null) { - inMemoryCompaction = HColumnDescriptor.MemoryCompaction.valueOf(conf.get - (CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, + inMemoryCompaction = MemoryCompactionPolicy.valueOf( + conf.get(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT)); } switch (inMemoryCompaction) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java index 2174d89..c435098 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java @@ -21,8 +21,8 @@ package org.apache.hadoop.hbase.regionserver; import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.HColumnDescriptor.MemoryCompaction; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.MemoryCompactionPolicy; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; @@ -86,7 +86,7 @@ public class MemStoreCompactor { private Action action = Action.FLATTEN; public MemStoreCompactor(CompactingMemStore compactingMemStore, - MemoryCompaction compactionPolicy) { + MemoryCompactionPolicy compactionPolicy) { this.compactingMemStore = compactingMemStore; this.compactionKVMax = compactingMemStore.getConfiguration() .getInt(HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); @@ -270,7 +270,7 @@ public class MemStoreCompactor { * Initiate the action according to user config, after its default is Action.MERGE */ @VisibleForTesting - void initiateAction(MemoryCompaction compType) { + void initiateAction(MemoryCompactionPolicy compType) { switch (compType){ case NONE: action = Action.NOOP; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 1169bc0..6529406 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1881,9 +1881,9 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { for (byte[] family : families) { HColumnDescriptor hcd = new HColumnDescriptor(family); if(compactedMemStore != null && i < compactedMemStore.length) { - hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.BASIC); + hcd.setInMemoryCompaction(MemoryCompactionPolicy.BASIC); } else { - hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.NONE); + hcd.setInMemoryCompaction(MemoryCompactionPolicy.NONE); } i++; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index d1fb7f8..5391884 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -631,8 +631,8 @@ public class PerformanceEvaluation extends Configured implements Tool { int columns = 1; int caching = 30; boolean addColumns = true; - HColumnDescriptor.MemoryCompaction inMemoryCompaction = - HColumnDescriptor.MemoryCompaction.valueOf( + MemoryCompactionPolicy inMemoryCompaction = + MemoryCompactionPolicy.valueOf( CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT); public TestOptions() {} @@ -978,11 +978,11 @@ public class PerformanceEvaluation extends Configured implements Tool { this.addColumns = addColumns; } - public void setInMemoryCompaction(HColumnDescriptor.MemoryCompaction inMemoryCompaction) { + public void setInMemoryCompaction(MemoryCompactionPolicy inMemoryCompaction) { this.inMemoryCompaction = inMemoryCompaction; } - public HColumnDescriptor.MemoryCompaction getInMemoryCompaction() { + public MemoryCompactionPolicy getInMemoryCompaction() { return this.inMemoryCompaction; } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java index 0c86145..47c306b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestAcidGuarantees.java @@ -102,7 +102,7 @@ public class TestAcidGuarantees implements Tool { ConstantSizeRegionSplitPolicy.class.getName()); conf.setInt("hfile.format.version", 3); // for mob tests conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); + String.valueOf(MemoryCompactionPolicy.NONE)); util = new HBaseTestingUtility(conf); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java index 2df3c87..eb2fa92 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java @@ -251,7 +251,7 @@ public class TestIOFencing { // Compact quickly after we tell it to! c.setInt("hbase.regionserver.thread.splitcompactcheckfrequency", 1000); c.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); + String.valueOf(MemoryCompactionPolicy.NONE)); LOG.info("Starting mini cluster"); TEST_UTIL.startMiniCluster(1); CompactionBlockerRegion compactingRegion = null; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java index 1eaf011..c43b319 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java @@ -33,12 +33,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hbase.ChoreService; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.Stoppable; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; import org.apache.hadoop.hbase.regionserver.CompactingMemStore; @@ -96,7 +91,7 @@ public class TestHFileArchiving { ConstantSizeRegionSplitPolicy.class.getName()); // no memory compaction conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); + String.valueOf(MemoryCompactionPolicy.NONE)); } @After diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java index 880114a..6b1a74f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java @@ -37,11 +37,7 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; import org.apache.commons.io.IOUtils; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.io.ByteBufferPool; import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -77,10 +73,10 @@ public class TestAsyncTableGetMultiThreaded { @BeforeClass public static void setUp() throws Exception { - setUp(HColumnDescriptor.MemoryCompaction.NONE); + setUp(MemoryCompactionPolicy.NONE); } - protected static void setUp(HColumnDescriptor.MemoryCompaction memoryCompaction) + protected static void setUp(MemoryCompactionPolicy memoryCompaction) throws Exception { TEST_UTIL.getConfiguration().set(TABLES_ON_MASTER, "none"); TEST_UTIL.getConfiguration().setLong(HBASE_CLIENT_META_OPERATION_TIMEOUT, 60000L); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java index eb07875..8743266 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.MemoryCompactionPolicy; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.junit.BeforeClass; @@ -29,7 +30,7 @@ public class TestAsyncTableGetMultiThreadedWithBasicCompaction extends @BeforeClass public static void setUp() throws Exception { - setUp(HColumnDescriptor.MemoryCompaction.BASIC); + setUp(MemoryCompactionPolicy.BASIC); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java index 6fe8045..ef75373 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.MemoryCompactionPolicy; import org.apache.hadoop.hbase.testclassification.ClientTests; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.junit.BeforeClass; @@ -29,7 +30,7 @@ public class TestAsyncTableGetMultiThreadedWithEagerCompaction extends @BeforeClass public static void setUp() throws Exception { - setUp(HColumnDescriptor.MemoryCompaction.EAGER); + setUp(MemoryCompactionPolicy.EAGER); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java index a48aafd..05790c3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMobSnapshotFromClient.java @@ -21,6 +21,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.MemoryCompactionPolicy; import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils; @@ -53,7 +54,7 @@ public class TestMobSnapshotFromClient extends TestSnapshotFromClient { TestSnapshotFromClient.setupConf(conf); conf.setInt(MobConstants.MOB_FILE_CACHE_SIZE_KEY, 0); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); + String.valueOf(MemoryCompactionPolicy.NONE)); } @Override diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java index 5863220..4111a69 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotCloneIndependence.java @@ -24,13 +24,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.CategoryBasedTimeout; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; @@ -119,7 +113,7 @@ public class TestSnapshotCloneIndependence { // the back reference itself. conf.setInt("hbase.master.hfilecleaner.ttl", CLEANER_INTERVAL); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); + String.valueOf(MemoryCompactionPolicy.NONE)); } @Before diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java index dcf8ba8..c08de4b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromClient.java @@ -30,12 +30,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.master.snapshot.SnapshotManager; import org.apache.hadoop.hbase.regionserver.CompactingMemStore; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; @@ -99,7 +94,7 @@ public class TestSnapshotFromClient { conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName()); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); + String.valueOf(MemoryCompactionPolicy.NONE)); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java index 94b2bc1..551b047 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java @@ -36,19 +36,7 @@ import java.util.concurrent.Future; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hbase.ChoreService; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.InterProcessLock; -import org.apache.hadoop.hbase.NotServingRegionException; -import org.apache.hadoop.hbase.ScheduledChore; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNotDisabledException; -import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver; import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; @@ -102,7 +90,7 @@ public class TestTableLockManager { @Before public void setUp() throws IOException { TEST_UTIL.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); + String.valueOf(MemoryCompactionPolicy.NONE)); } @After diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java index 511b3de..1aa454f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java @@ -29,6 +29,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.MemoryCompactionPolicy; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.master.TableLockManager; import org.apache.hadoop.hbase.master.procedure.TestMasterProcedureScheduler.TestTableProcedure; @@ -58,7 +59,7 @@ public class TestMasterProcedureSchedulerConcurrency { public void setUp() throws IOException { conf = HBaseConfiguration.create(); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); + String.valueOf(MemoryCompactionPolicy.NONE)); queue = new MasterProcedureScheduler(conf, new TableLockManager.NullTableLockManager()); queue.start(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java index 0aa2814..65ad956 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java @@ -26,15 +26,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.KeepDeletedCells; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValueTestUtil; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.util.MemorySizeUtil; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -82,7 +74,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { public void setUp() throws Exception { compactingSetUp(); this.memstore = new CompactingMemStore(HBaseConfiguration.create(), CellComparator.COMPARATOR, - store, regionServicesForStores, HColumnDescriptor.MemoryCompaction.EAGER); + store, regionServicesForStores, MemoryCompactionPolicy.EAGER); } protected void compactingSetUp() throws Exception { @@ -136,7 +128,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { // use case 3: first in snapshot second in kvset this.memstore = new CompactingMemStore(HBaseConfiguration.create(), CellComparator.COMPARATOR, store, regionServicesForStores, - HColumnDescriptor.MemoryCompaction.EAGER); + MemoryCompactionPolicy.EAGER); this.memstore.add(kv1.clone(), null); // As compaction is starting in the background the repetition // of the k1 might be removed BUT the scanners created earlier @@ -475,7 +467,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { throws IOException { // set memstore to do data compaction and not to use the speculative scan - HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER; + MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.EAGER; memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compactionType)); ((CompactingMemStore)memstore).initiateType(compactionType); @@ -562,7 +554,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { public void testCompaction1Bucket() throws IOException { // set memstore to do data compaction and not to use the speculative scan - HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER; + MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.EAGER; memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compactionType)); ((CompactingMemStore)memstore).initiateType(compactionType); @@ -599,7 +591,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { public void testCompaction2Buckets() throws IOException { // set memstore to do data compaction and not to use the speculative scan - HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER; + MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.EAGER; memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compactionType)); ((CompactingMemStore)memstore).initiateType(compactionType); @@ -654,7 +646,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore { public void testCompaction3Buckets() throws IOException { // set memstore to do data compaction and not to use the speculative scan - HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.EAGER; + MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.EAGER; memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compactionType)); ((CompactingMemStore)memstore).initiateType(compactionType); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java index bc6f982..748576c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java @@ -64,11 +64,11 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore // set memstore to do data compaction conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER)); + String.valueOf(MemoryCompactionPolicy.EAGER)); this.memstore = new CompactingMemStore(conf, CellComparator.COMPARATOR, store, - regionServicesForStores, HColumnDescriptor.MemoryCompaction.EAGER); + regionServicesForStores, MemoryCompactionPolicy.EAGER); } ////////////////////////////////////////////////////////////////////////////// @@ -267,7 +267,7 @@ public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore String[] keys2 = { "A", "B", "D", "G", "I", "J"}; String[] keys3 = { "D", "B", "B", "E" }; - HColumnDescriptor.MemoryCompaction compactionType = HColumnDescriptor.MemoryCompaction.BASIC; + MemoryCompactionPolicy compactionType = MemoryCompactionPolicy.BASIC; memstore.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, String.valueOf(compactionType)); ((CompactingMemStore)memstore).initiateType(compactionType); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index 49f491b..f254792 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -43,13 +43,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.ChoreService; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HBaseTestCase; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; @@ -109,7 +103,7 @@ public class TestCompaction { conf.set(CompactionThroughputControllerFactory.HBASE_THROUGHPUT_CONTROLLER_KEY, NoLimitThroughputController.class.getName()); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); + String.valueOf(MemoryCompactionPolicy.NONE)); compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3); secondRowBytes = START_KEY_BYTES.clone(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java index 381a73f..19ed232 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMajorCompaction.java @@ -37,13 +37,7 @@ import java.util.Map.Entry; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseTestCase; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Result; @@ -96,7 +90,7 @@ public class TestMajorCompaction { conf.setInt(HConstants.HREGION_MEMSTORE_BLOCK_MULTIPLIER, 100); compactionThreshold = conf.getInt("hbase.hstore.compactionThreshold", 3); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); + String.valueOf(MemoryCompactionPolicy.NONE)); secondRowBytes = START_KEY_BYTES.clone(); // Increment the least significant character so we get to next row. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java index 295f47a..839e5e5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java @@ -22,16 +22,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MiniHBaseCluster; -import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.Waiter; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -86,7 +77,7 @@ public class TestPerColumnFamilyFlush { @Before public void setUp() throws IOException { TEST_UTIL.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); + String.valueOf(MemoryCompactionPolicy.NONE)); } private HRegion initHRegion(String callingMethod, Configuration conf) throws IOException { @@ -137,7 +128,7 @@ public class TestPerColumnFamilyFlush { conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 40 * 1024); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); + String.valueOf(MemoryCompactionPolicy.NONE)); // Intialize the region Region region = initHRegion("testSelectiveFlushWithDataCompaction", conf); // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3 diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java index e619a9a..553075f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java @@ -28,16 +28,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellComparator; -import org.apache.hadoop.hbase.CellScanner; -import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.regionserver.wal.WALEdit; @@ -75,7 +66,7 @@ public class TestRecoveredEdits { // Set it so we flush every 1M or so. Thats a lot. conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); + String.valueOf(MemoryCompactionPolicy.NONE)); // The file of recovered edits has a column family of 'meta'. Also has an encoded regionname // of 4823016d8fca70b25503ee07f4c6d79f which needs to match on replay. final String encodedRegionName = "4823016d8fca70b25503ee07f4c6d79f"; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java index 8215d53..f2da776 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java @@ -22,13 +22,7 @@ import java.util.Arrays; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; @@ -72,10 +66,10 @@ public class TestWalAndCompactingMemStoreFlush { HColumnDescriptor hcd = new HColumnDescriptor(family); // even column families are going to have compacted memstore if(i%2 == 0) { - hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.valueOf( + hcd.setInMemoryCompaction(MemoryCompactionPolicy.valueOf( conf.get(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY))); } else { - hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.NONE); + hcd.setInMemoryCompaction(MemoryCompactionPolicy.NONE); } htd.addFamily(hcd); i++; @@ -139,7 +133,7 @@ public class TestWalAndCompactingMemStoreFlush { conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.25); // set memstore to do data compaction conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER)); + String.valueOf(MemoryCompactionPolicy.EAGER)); // Intialize the region Region region = initHRegion("testSelectiveFlushWithEager", conf); @@ -375,7 +369,7 @@ public class TestWalAndCompactingMemStoreFlush { conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); // set memstore to index-compaction conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC)); + String.valueOf(MemoryCompactionPolicy.BASIC)); // Initialize the region Region region = initHRegion("testSelectiveFlushWithIndexCompaction", conf); @@ -628,7 +622,7 @@ public class TestWalAndCompactingMemStoreFlush { conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); // set memstore to do data compaction and not to use the speculative scan conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER)); + String.valueOf(MemoryCompactionPolicy.EAGER)); // Intialize the HRegion HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf); @@ -763,7 +757,7 @@ public class TestWalAndCompactingMemStoreFlush { conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); // set memstore to do data compaction and not to use the speculative scan conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC)); + String.valueOf(MemoryCompactionPolicy.BASIC)); // Intialize the HRegion HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf); @@ -887,7 +881,7 @@ public class TestWalAndCompactingMemStoreFlush { conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); // set memstore to do data compaction and not to use the speculative scan conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.BASIC)); + String.valueOf(MemoryCompactionPolicy.BASIC)); // Successfully initialize the HRegion HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java index 1923b4a..a4acc4f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java @@ -27,14 +27,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MiniHBaseCluster; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; @@ -116,7 +109,7 @@ public abstract class AbstractTestLogRolling { conf.setInt(HConstants.THREAD_WAKE_FREQUENCY, 2 * 1000); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); + String.valueOf(MemoryCompactionPolicy.NONE)); } @Before diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java index 84bdc69..ceefc98 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java @@ -51,19 +51,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.MasterNotRunningException; -import org.apache.hadoop.hbase.MiniHBaseCluster; -import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; @@ -140,7 +128,7 @@ public abstract class AbstractTestWALReplay { // The below config supported by 0.20-append and CDH3b2 conf.setInt("dfs.client.block.recovery.retries", 2); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); + String.valueOf(MemoryCompactionPolicy.NONE)); TEST_UTIL.startMiniCluster(3); Path hbaseRootDir = TEST_UTIL.getDFSCluster().getFileSystem().makeQualified(new Path("/hbase")); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java index 92071d1..8509c9d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestFlushSnapshotFromClient.java @@ -34,13 +34,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.CategoryBasedTimeout; -import org.apache.hadoop.hbase.HBaseTestingUtility; -import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.TableNotFoundException; +import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.SnapshotType; import org.apache.hadoop.hbase.client.Table; @@ -107,7 +101,7 @@ public class TestFlushSnapshotFromClient { conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, ConstantSizeRegionSplitPolicy.class.getName()); conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY, - String.valueOf(HColumnDescriptor.MemoryCompaction.NONE)); + String.valueOf(MemoryCompactionPolicy.NONE)); } @Before diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index f191674..0718627 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -816,7 +816,7 @@ module Hbase family.setCacheDataOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE) family.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY) family.setInMemoryCompaction( - org.apache.hadoop.hbase.HColumnDescriptor.MemoryCompaction.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION) + org.apache.hadoop.hbase.MemoryCompactionPolicy.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION) family.setTimeToLive(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::TTL)) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL) family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING) family.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE) -- 1.8.5.2 (Apple Git-48)