From 2e54ad944324ae87667332979c4c28d88344c68e Mon Sep 17 00:00:00 2001 From: zhangduo Date: Tue, 1 Mar 2016 20:45:08 +0800 Subject: [PATCH] HBASE-15368 Add relative window support --- .../compactions/CompactionConfiguration.java | 18 +- .../compactions/DateTieredCompactionPolicy.java | 98 +++------- .../compactions/DateTieredWindowFactory.java | 50 +++++ .../compactions/FixedDateTieredWindowFactory.java | 75 ++++++++ .../RelativeDateTieredWindowFactory.java | 66 +++++++ .../regionserver/TestDateTieredCompaction.java | 211 --------------------- .../TestDateTieredCompactionPolicy.java | 79 ++++++++ .../TestDateTieredCompactionWithFixedWindow.java | 166 ++++++++++++++++ ...TestDateTieredCompactionWithRelativeWindow.java | 105 ++++++++++ 9 files changed, 584 insertions(+), 284 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredWindowFactory.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FixedDateTieredWindowFactory.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RelativeDateTieredWindowFactory.java delete mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompaction.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicy.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionWithFixedWindow.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionWithRelativeWindow.java diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java index 9bb4c77..6a76c4d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java @@ -84,6 +84,12 @@ public class CompactionConfiguration { private static final Class DEFAULT_TIER_COMPACTION_POLICY_CLASS = ExploringCompactionPolicy.class; + public static final String WINDOW_FACTORY_CLASS = + "hbase.hstore.compaction.date.tiered.window.factory.class"; + + private static final Class + DEFAULT_WINDOW_FACTORY_CLASS = FixedDateTieredWindowFactory.class; + Configuration conf; StoreConfigInformation storeConfigInfo; @@ -105,6 +111,7 @@ public class CompactionConfiguration { private final int windowsPerTier; private final int incomingWindowMin; private final String compactionPolicyForTieredWindow; + private final String windowFactory; CompactionConfiguration(Configuration conf, StoreConfigInformation storeConfigInfo) { this.conf = conf; @@ -134,6 +141,7 @@ public class CompactionConfiguration { incomingWindowMin = conf.getInt(INCOMING_WINDOW_MIN_KEY, 6); compactionPolicyForTieredWindow = conf.get(COMPACTION_POLICY_CLASS_FOR_TIERED_WINDOWS_KEY, DEFAULT_TIER_COMPACTION_POLICY_CLASS.getName()); + windowFactory = conf.get(WINDOW_FACTORY_CLASS, DEFAULT_WINDOW_FACTORY_CLASS.getName()); LOG.info(this); } @@ -143,7 +151,7 @@ public class CompactionConfiguration { "size [%d, %d, %d); files [%d, %d); ratio %f; off-peak ratio %f; throttle point %d;" + " major period %d, major jitter %f, min locality to compact %f;" + " tiered compaction: max_age %d, base window in milliseconds %d, windows per tier %d," - + "incoming window min %d", + + "incoming window min %d, compaction policy per window %s,window factory %s", minCompactSize, maxCompactSize, offPeakMaxCompactSize, @@ -158,7 +166,9 @@ public class CompactionConfiguration { maxStoreFileAgeMillis, baseWindowMillis, windowsPerTier, - incomingWindowMin); + incomingWindowMin, + compactionPolicyForTieredWindow, + windowFactory); } /** @@ -274,4 +284,8 @@ public class CompactionConfiguration { public String getCompactionPolicyForTieredWindow() { return compactionPolicyForTieredWindow; } + + public String getWindowFactory() { + return windowFactory; + } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java index 16b534c..98ff3ed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java @@ -17,14 +17,6 @@ */ package org.apache.hadoop.hbase.regionserver.compactions; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Predicate; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Iterables; -import com.google.common.collect.Iterators; -import com.google.common.collect.Lists; -import com.google.common.collect.PeekingIterator; - import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -38,10 +30,19 @@ import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.regionserver.StoreConfigInformation; import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.compactions.DateTieredWindowFactory.Window; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.ReflectionUtils; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Predicate; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterables; +import com.google.common.collect.Iterators; +import com.google.common.collect.Lists; +import com.google.common.collect.PeekingIterator; + /** * HBASE-15181 This is a simple implementation of date-based tiered compaction similar to * Cassandra's for the following benefits: @@ -60,7 +61,9 @@ import org.apache.hadoop.hbase.util.ReflectionUtils; public class DateTieredCompactionPolicy extends RatioBasedCompactionPolicy { private static final Log LOG = LogFactory.getLog(DateTieredCompactionPolicy.class); - private RatioBasedCompactionPolicy compactionPolicyPerWindow; + private final RatioBasedCompactionPolicy compactionPolicyPerWindow; + + private final DateTieredWindowFactory windowFactory; public DateTieredCompactionPolicy(Configuration conf, StoreConfigInformation storeConfigInfo) throws IOException { @@ -74,6 +77,13 @@ public class DateTieredCompactionPolicy extends RatioBasedCompactionPolicy { throw new IOException("Unable to load configured compaction policy '" + comConf.getCompactionPolicyForTieredWindow() + "'", e); } + try { + this.windowFactory = ReflectionUtils.instantiateWithCustomCtor(comConf.getWindowFactory(), + new Class[] { CompactionConfiguration.class }, new Object[] { comConf }); + } catch (Exception e) { + throw new IOException( + "Unable to load configured window factory '" + comConf.getWindowFactory() + "'", e); + } } @Override @@ -132,12 +142,10 @@ public class DateTieredCompactionPolicy extends RatioBasedCompactionPolicy { filterOldStoreFiles(Lists.newArrayList(candidates), comConf.getMaxStoreFileAgeMillis(), now); List> buckets = - partitionFilesToBuckets(candidatesInWindow, comConf.getBaseWindowMillis(), - comConf.getWindowsPerTier(), now); + partitionFilesToBuckets(candidatesInWindow, windowFactory, now); LOG.debug("Compaction buckets are: " + buckets); - return newestBucket(buckets, comConf.getIncomingWindowMin(), now, - comConf.getBaseWindowMillis(), mayUseOffPeak); + return newestBucket(buckets, comConf.getIncomingWindowMin(), now, mayUseOffPeak); } /** @@ -150,9 +158,9 @@ public class DateTieredCompactionPolicy extends RatioBasedCompactionPolicy { * @throws IOException error */ private ArrayList newestBucket(List> buckets, - int incomingWindowThreshold, long now, long baseWindowMillis, boolean mayUseOffPeak) + int incomingWindowThreshold, long now, boolean mayUseOffPeak) throws IOException { - Window incomingWindow = getInitialWindow(now, baseWindowMillis); + Window incomingWindow = windowFactory.getInitialWindow(now); for (ArrayList bucket : buckets) { int minThreshold = incomingWindow.compareToTimestamp(bucket.get(0).getMaximumTimestamp()) <= 0 ? comConf @@ -172,13 +180,12 @@ public class DateTieredCompactionPolicy extends RatioBasedCompactionPolicy { * current file has a maxTimestamp older than last known maximum, treat this file as it carries * the last known maximum. This way both seqId and timestamp are in the same order. If files carry * the same maxTimestamps, they are ordered by seqId. We then reverse the list so they are ordered - * by seqId and maxTimestamp in decending order and build the time windows. All the out-of-order + * by seqId and maxTimestamp in descending order and build the time windows. All the out-of-order * data into the same compaction windows, guaranteeing contiguous compaction based on sequence id. */ - private static List> partitionFilesToBuckets(Iterable storeFiles, - long baseWindowSizeMillis, int windowsPerTier, long now) { + private static List> partitionFilesToBuckets(Iterable storeFiles, DateTieredWindowFactory windowFactory, long now) { List> buckets = Lists.newArrayList(); - Window window = getInitialWindow(now, baseWindowSizeMillis); + Window window = windowFactory.getInitialWindow(now); List> storefileMaxTimestampPairs = Lists.newArrayListWithCapacity(Iterables.size(storeFiles)); @@ -198,7 +205,7 @@ public class DateTieredCompactionPolicy extends RatioBasedCompactionPolicy { int compResult = window.compareToTimestamp(it.peek().getSecond()); if (compResult > 0) { // If the file is too old for the window, switch to the next window - window = window.nextWindow(windowsPerTier); + window = window.nextWindow(); } else { // The file is within the target window ArrayList bucket = Lists.newArrayList(); @@ -240,55 +247,4 @@ public class DateTieredCompactionPolicy extends RatioBasedCompactionPolicy { } }); } - - private static Window getInitialWindow(long now, long timeUnit) { - return new Window(timeUnit, now / timeUnit); - } - - /** - * This is the class we use to partition from epoch time to now into tiers of exponential sizes of - * windows. - */ - private static final class Window { - /** - * How big a range of timestamps fit inside the window in milliseconds. - */ - private final long windowMillis; - - /** - * A timestamp t is within the window iff t / size == divPosition. - */ - private final long divPosition; - - private Window(long baseWindowMillis, long divPosition) { - this.windowMillis = baseWindowMillis; - this.divPosition = divPosition; - } - - /** - * Compares the window to a timestamp. - * @param timestamp the timestamp to compare. - * @return a negative integer, zero, or a positive integer as the window lies before, covering, - * or after than the timestamp. - */ - public int compareToTimestamp(long timestamp) { - long pos = timestamp / windowMillis; - return divPosition == pos ? 0 : divPosition < pos ? -1 : 1; - } - - /** - * Move to the new window of the same tier or of the next tier, which represents an earlier time - * span. - * @param windowsPerTier The number of contiguous windows that will have the same size. Windows - * following those will be tierBase times as big. - * @return The next window - */ - public Window nextWindow(int windowsPerTier) { - if (divPosition % windowsPerTier > 0) { - return new Window(windowMillis, divPosition - 1); - } else { - return new Window(windowMillis * windowsPerTier, divPosition / windowsPerTier - 1); - } - } - } } \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredWindowFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredWindowFactory.java new file mode 100644 index 0000000..f56e43e --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredWindowFactory.java @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver.compactions; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * Factory used to generate tiered window. + */ +@InterfaceAudience.Private +public interface DateTieredWindowFactory { + + /** + * This is the class we use to partition from epoch time to now into tiers of exponential sizes of + * windows. + */ + interface Window { + /** + * Compares the window to a timestamp. + * @param timestamp the timestamp to compare. + * @return a negative integer, zero, or a positive integer as the window lies before, covering, + * or after than the timestamp. + */ + int compareToTimestamp(long timestamp); + + /** + * Move to the new window of the same tier or of the next tier, which represents an earlier time + * span. + * @return The next window + */ + Window nextWindow(); + } + + Window getInitialWindow(long now); +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FixedDateTieredWindowFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FixedDateTieredWindowFactory.java new file mode 100644 index 0000000..16491ca --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FixedDateTieredWindowFactory.java @@ -0,0 +1,75 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver.compactions; + +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * Generate window whose boundary is fixed based on epoch time. + */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) +public class FixedDateTieredWindowFactory implements DateTieredWindowFactory { + + private final class FixedWindow implements Window { + /** + * How big a range of timestamps fit inside the window in milliseconds. + */ + private final long windowMillis; + + /** + * A timestamp t is within the window iff t / size == divPosition. + */ + private final long divPosition; + + private FixedWindow(long baseWindowMillis, long divPosition) { + this.windowMillis = baseWindowMillis; + this.divPosition = divPosition; + } + + @Override + public int compareToTimestamp(long timestamp) { + long pos = timestamp / windowMillis; + return divPosition == pos ? 0 : divPosition < pos ? -1 : 1; + } + + @Override + public FixedWindow nextWindow() { + if (divPosition % windowsPerTier > 0) { + return new FixedWindow(windowMillis, divPosition - 1); + } else { + return new FixedWindow(windowMillis * windowsPerTier, divPosition / windowsPerTier - 1); + } + } + } + + private final long baseWindowSizeMillis; + + private final int windowsPerTier; + + public FixedDateTieredWindowFactory(CompactionConfiguration comConf) { + this.baseWindowSizeMillis = comConf.getBaseWindowMillis(); + this.windowsPerTier = comConf.getWindowsPerTier(); + } + + @Override + public FixedWindow getInitialWindow(long now) { + return new FixedWindow(baseWindowSizeMillis, now / baseWindowSizeMillis); + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RelativeDateTieredWindowFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RelativeDateTieredWindowFactory.java new file mode 100644 index 0000000..98e32c6 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/RelativeDateTieredWindowFactory.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver.compactions; + +import org.apache.hadoop.hbase.HBaseInterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * Generate window whose boundary is relative to now. + */ +@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) +public class RelativeDateTieredWindowFactory implements DateTieredWindowFactory { + + private final class RelativeWindow implements Window { + + private final long windowStartMillis; + + private final long windowMillis; + + private RelativeWindow(long windowStartMillis, long windowMillis) { + this.windowStartMillis = windowStartMillis; + this.windowMillis = windowMillis; + } + + @Override + public int compareToTimestamp(long timestamp) { + long delta = timestamp - windowStartMillis; + return delta < 0 ? 1 : delta < windowMillis ? 0 : -1; + } + + @Override + public Window nextWindow() { + long nextWindowMillis = windowMillis * times; + return new RelativeWindow(windowStartMillis - nextWindowMillis, nextWindowMillis); + } + } + + private final long baseWindowSizeMillis; + + private final int times; + + public RelativeDateTieredWindowFactory(CompactionConfiguration conf) { + this.baseWindowSizeMillis = conf.getBaseWindowMillis(); + this.times = conf.getMinFilesToCompact(); + } + + @Override + public Window getInitialWindow(long now) { + return new RelativeWindow(now - baseWindowSizeMillis, baseWindowSizeMillis); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompaction.java deleted file mode 100644 index cfb54b7..0000000 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompaction.java +++ /dev/null @@ -1,211 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.regionserver; - -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; -import org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionPolicy; -import org.apache.hadoop.hbase.testclassification.SmallTests; -import org.junit.Assert; -import org.junit.Test; -import org.junit.experimental.categories.Category; - -@Category(SmallTests.class) -public class TestDateTieredCompaction extends TestCompactionPolicy { - ArrayList sfCreate(long[] minTimestamps, long[] maxTimestamps, long[] sizes) - throws IOException { - ArrayList ageInDisk = new ArrayList(); - for (int i = 0; i < sizes.length; i++) { - ageInDisk.add(0L); - } - - ArrayList ret = Lists.newArrayList(); - for (int i = 0; i < sizes.length; i++) { - MockStoreFile msf = - new MockStoreFile(TEST_UTIL, TEST_FILE, sizes[i], ageInDisk.get(i), false, i); - msf.setTimeRangeTracker(new TimeRangeTracker(minTimestamps[i], maxTimestamps[i])); - ret.add(msf); - } - return ret; - } - - @Override - protected void config() { - super.config(); - - // Set up policy - conf.setLong(CompactionConfiguration.MAX_AGE_MILLIS_KEY, 100); - conf.setLong(CompactionConfiguration.INCOMING_WINDOW_MIN_KEY, 3); - conf.setLong(CompactionConfiguration.BASE_WINDOW_MILLIS_KEY, 6); - conf.setInt(CompactionConfiguration.WINDOWS_PER_TIER_KEY, 4); - conf.set(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, - DateTieredCompactionPolicy.class.getName()); - - // Special settings for compaction policy per window - this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 2); - this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 12); - this.conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.2F); - } - - void compactEquals(long now, ArrayList candidates, long... expected) - throws IOException { - Assert.assertTrue(((DateTieredCompactionPolicy) store.storeEngine.getCompactionPolicy()) - .needsCompaction(candidates, ImmutableList. of(), now)); - - List actual = - ((DateTieredCompactionPolicy) store.storeEngine.getCompactionPolicy()) - .applyCompactionPolicy(candidates, false, false, now); - - Assert.assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual))); - } - - /** - * Test for incoming window - * @throws IOException with error - */ - @Test - public void incomingWindow() throws IOException { - long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - long[] maxTimestamps = new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; - long[] sizes = new long[] { 30, 31, 32, 33, 34, 20, 21, 22, 23, 24, 25, 10, 11, 12, 13 }; - - compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), 13, 12, 11, 10); - } - - /** - * Not enough files in incoming window - * @throws IOException with error - */ - @Test - public void NotIncomingWindow() throws IOException { - long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - long[] maxTimestamps = new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 }; - long[] sizes = new long[] { 30, 31, 32, 33, 34, 20, 21, 22, 23, 24, 25, 10, 11 }; - - compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), 25, 24, 23, 22, 21, 20); - } - - /** - * Test for file newer than incoming window - * @throws IOException with error - */ - @Test - public void NewerThanIncomingWindow() throws IOException { - long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - long[] maxTimestamps = new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 18 }; - long[] sizes = new long[] { 30, 31, 32, 33, 34, 20, 21, 22, 23, 24, 25, 10, 11, 12, 13 }; - - compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), 13, 12, 11, 10); - } - - /** - * If there is no T1 window, we don't build 2 - * @throws IOException with error - */ - @Test - public void NoT2() throws IOException { - long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - long[] maxTimestamps = new long[] { 44, 60, 61, 92, 95, 100 }; - long[] sizes = new long[] { 0, 20, 21, 22, 23, 1 }; - - compactEquals(100, sfCreate(minTimestamps, maxTimestamps, sizes), 23, 22); - } - - @Test - public void T1() throws IOException { - long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - long[] maxTimestamps = new long[] { 44, 60, 61, 96, 100, 104, 120, 124, 143, 145, 157 }; - long[] sizes = new long[] { 0, 50, 51, 40, 41, 42, 30, 31, 32, 2, 1 }; - - compactEquals(161, sfCreate(minTimestamps, maxTimestamps, sizes), 32, 31, 30); - } - - /** - * Apply exploring logic on non-incoming window - * @throws IOException with error - */ - @Test - public void RatioT0() throws IOException { - long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - long[] maxTimestamps = new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }; - long[] sizes = new long[] { 30, 31, 32, 33, 34, 20, 21, 22, 280, 23, 24, 1 }; - - compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), 22, 21, 20); - } - - /** - * Also apply ratio-based logic on t2 window - * @throws IOException with error - */ - @Test - public void RatioT2() throws IOException { - long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - long[] maxTimestamps = new long[] { 44, 60, 61, 96, 100, 104, 120, 124, 143, 145, 157 }; - long[] sizes = new long[] { 0, 50, 51, 40, 41, 42, 350, 30, 31, 2, 1 }; - - compactEquals(161, sfCreate(minTimestamps, maxTimestamps, sizes), 31, 30); - } - - /** - * The next compaction call after testTieredCompactionRatioT0 is compacted - * @throws IOException with error - */ - @Test - public void RatioT0Next() throws IOException { - long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - long[] maxTimestamps = new long[] { 1, 2, 3, 4, 5, 8, 9, 10, 11, 12 }; - long[] sizes = new long[] { 30, 31, 32, 33, 34, 22, 280, 23, 24, 1 }; - - compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), 24, 23); - } - - /** - * Older than now(161) - maxAge(100) - * @throws IOException with error - */ - @Test - public void olderThanMaxAge() throws IOException { - long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - long[] maxTimestamps = new long[] { 44, 60, 61, 96, 100, 104, 105, 106, 113, 145, 157 }; - long[] sizes = new long[] { 0, 50, 51, 40, 41, 42, 33, 30, 31, 2, 1 }; - - compactEquals(161, sfCreate(minTimestamps, maxTimestamps, sizes), 31, 30, 33, 42, 41, 40); - } - - /** - * Out-of-order data - * @throws IOException with error - */ - @Test - public void OutOfOrder() throws IOException { - long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; - long[] maxTimestamps = new long[] { 0, 13, 3, 10, 11, 1, 2, 12, 14, 15 }; - long[] sizes = new long[] { 30, 31, 32, 33, 34, 22, 28, 23, 24, 1 }; - - compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), 1, 24, 23, 28, 22, 34, 33, 32, - 31); - } -} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicy.java new file mode 100644 index 0000000..a5dfb15 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionPolicy.java @@ -0,0 +1,79 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; +import org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionPolicy; +import org.junit.Assert; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +public class TestDateTieredCompactionPolicy extends TestCompactionPolicy { + + @Override + protected void config() { + super.config(); + + // Set up policy + conf.setLong(CompactionConfiguration.MAX_AGE_MILLIS_KEY, 100); + conf.setLong(CompactionConfiguration.INCOMING_WINDOW_MIN_KEY, 3); + conf.setLong(CompactionConfiguration.BASE_WINDOW_MILLIS_KEY, 6); + conf.setInt(CompactionConfiguration.WINDOWS_PER_TIER_KEY, 4); + conf.set(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY, + DateTieredCompactionPolicy.class.getName()); + + // Special settings for compaction policy per window + this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, 2); + this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, 12); + this.conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.2F); + } + + protected ArrayList sfCreate(long[] minTimestamps, long[] maxTimestamps, long[] sizes) + throws IOException { + ArrayList ageInDisk = new ArrayList(); + for (int i = 0; i < sizes.length; i++) { + ageInDisk.add(0L); + } + + ArrayList ret = Lists.newArrayList(); + for (int i = 0; i < sizes.length; i++) { + MockStoreFile msf = new MockStoreFile(TEST_UTIL, TEST_FILE, sizes[i], ageInDisk.get(i), false, + i); + msf.setTimeRangeTracker(new TimeRangeTracker(minTimestamps[i], maxTimestamps[i])); + ret.add(msf); + } + return ret; + } + + protected void compactEquals(long now, ArrayList candidates, long... expected) + throws IOException { + Assert.assertTrue(((DateTieredCompactionPolicy) store.storeEngine.getCompactionPolicy()) + .needsCompaction(candidates, ImmutableList. of(), now)); + + List actual = ((DateTieredCompactionPolicy) store.storeEngine.getCompactionPolicy()) + .applyCompactionPolicy(candidates, false, false, now); + + Assert.assertEquals(Arrays.toString(expected), Arrays.toString(getSizes(actual))); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionWithFixedWindow.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionWithFixedWindow.java new file mode 100644 index 0000000..d792d26 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionWithFixedWindow.java @@ -0,0 +1,166 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; + +import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; +import org.apache.hadoop.hbase.regionserver.compactions.DateTieredWindowFactory; +import org.apache.hadoop.hbase.regionserver.compactions.FixedDateTieredWindowFactory; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(SmallTests.class) +public class TestDateTieredCompactionWithFixedWindow extends TestDateTieredCompactionPolicy { + + @Override + protected void config() { + super.config(); + this.conf.setClass(CompactionConfiguration.WINDOW_FACTORY_CLASS, + FixedDateTieredWindowFactory.class, DateTieredWindowFactory.class); + } + + /** + * Test for incoming window + * @throws IOException with error + */ + @Test + public void incomingWindow() throws IOException { + long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + long[] maxTimestamps = new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; + long[] sizes = new long[] { 30, 31, 32, 33, 34, 20, 21, 22, 23, 24, 25, 10, 11, 12, 13 }; + + compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), 13, 12, 11, 10); + } + + /** + * Not enough files in incoming window + * @throws IOException with error + */ + @Test + public void NotIncomingWindow() throws IOException { + long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + long[] maxTimestamps = new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13 }; + long[] sizes = new long[] { 30, 31, 32, 33, 34, 20, 21, 22, 23, 24, 25, 10, 11 }; + + compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), 25, 24, 23, 22, 21, 20); + } + + /** + * Test for file newer than incoming window + * @throws IOException with error + */ + @Test + public void NewerThanIncomingWindow() throws IOException { + long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + long[] maxTimestamps = new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 18 }; + long[] sizes = new long[] { 30, 31, 32, 33, 34, 20, 21, 22, 23, 24, 25, 10, 11, 12, 13 }; + + compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), 13, 12, 11, 10); + } + + /** + * If there is no T1 window, we don't build 2 + * @throws IOException with error + */ + @Test + public void NoT2() throws IOException { + long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + long[] maxTimestamps = new long[] { 44, 60, 61, 92, 95, 100 }; + long[] sizes = new long[] { 0, 20, 21, 22, 23, 1 }; + + compactEquals(100, sfCreate(minTimestamps, maxTimestamps, sizes), 23, 22); + } + + @Test + public void T1() throws IOException { + long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + long[] maxTimestamps = new long[] { 44, 60, 61, 96, 100, 104, 120, 124, 143, 145, 157 }; + long[] sizes = new long[] { 0, 50, 51, 40, 41, 42, 30, 31, 32, 2, 1 }; + + compactEquals(161, sfCreate(minTimestamps, maxTimestamps, sizes), 32, 31, 30); + } + + /** + * Apply exploring logic on non-incoming window + * @throws IOException with error + */ + @Test + public void RatioT0() throws IOException { + long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + long[] maxTimestamps = new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }; + long[] sizes = new long[] { 30, 31, 32, 33, 34, 20, 21, 22, 280, 23, 24, 1 }; + + compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), 22, 21, 20); + } + + /** + * Also apply ratio-based logic on t2 window + * @throws IOException with error + */ + @Test + public void RatioT2() throws IOException { + long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + long[] maxTimestamps = new long[] { 44, 60, 61, 96, 100, 104, 120, 124, 143, 145, 157 }; + long[] sizes = new long[] { 0, 50, 51, 40, 41, 42, 350, 30, 31, 2, 1 }; + + compactEquals(161, sfCreate(minTimestamps, maxTimestamps, sizes), 31, 30); + } + + /** + * The next compaction call after testTieredCompactionRatioT0 is compacted + * @throws IOException with error + */ + @Test + public void RatioT0Next() throws IOException { + long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + long[] maxTimestamps = new long[] { 1, 2, 3, 4, 5, 8, 9, 10, 11, 12 }; + long[] sizes = new long[] { 30, 31, 32, 33, 34, 22, 280, 23, 24, 1 }; + + compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), 24, 23); + } + + /** + * Older than now(161) - maxAge(100) + * @throws IOException with error + */ + @Test + public void olderThanMaxAge() throws IOException { + long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + long[] maxTimestamps = new long[] { 44, 60, 61, 96, 100, 104, 105, 106, 113, 145, 157 }; + long[] sizes = new long[] { 0, 50, 51, 40, 41, 42, 33, 30, 31, 2, 1 }; + + compactEquals(161, sfCreate(minTimestamps, maxTimestamps, sizes), 31, 30, 33, 42, 41, 40); + } + + /** + * Out-of-order data + * @throws IOException with error + */ + @Test + public void OutOfOrder() throws IOException { + long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + long[] maxTimestamps = new long[] { 0, 13, 3, 10, 11, 1, 2, 12, 14, 15 }; + long[] sizes = new long[] { 30, 31, 32, 33, 34, 22, 28, 23, 24, 1 }; + + compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), 1, 24, 23, 28, 22, 34, 33, 32, + 31); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionWithRelativeWindow.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionWithRelativeWindow.java new file mode 100644 index 0000000..70944ca --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDateTieredCompactionWithRelativeWindow.java @@ -0,0 +1,105 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; + +import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration; +import org.apache.hadoop.hbase.regionserver.compactions.DateTieredWindowFactory; +import org.apache.hadoop.hbase.regionserver.compactions.RelativeDateTieredWindowFactory; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +@Category(SmallTests.class) +public class TestDateTieredCompactionWithRelativeWindow extends TestDateTieredCompactionPolicy { + + @Override + protected void config() { + super.config(); + + this.conf.setClass(CompactionConfiguration.WINDOW_FACTORY_CLASS, + RelativeDateTieredWindowFactory.class, DateTieredWindowFactory.class); + } + + /** + * Test for incoming window + * @throws IOException with error + */ + @Test + public void incomingWindow() throws IOException { + long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + long[] maxTimestamps = new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; + long[] sizes = new long[] { 30, 31, 32, 33, 34, 20, 91, 22, 93, 24, 95, 10, 11, 12, 13 }; + + compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), 13, 12, 11, 10); + } + + /** + * Not enough files in incoming window + * @throws IOException with error + */ + @Test + public void NotIncomingWindow() throws IOException { + long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + long[] maxTimestamps = new long[] { 1, 2, 3, 4, 5, 6, 14, 15, 16, 17, 18, 19, 20 }; + long[] sizes = new long[] { 30, 31, 32, 33, 34, 20, 21, 22, 23, 24, 125, 10, 11 }; + + compactEquals(25, sfCreate(minTimestamps, maxTimestamps, sizes), 24, 23, 22, 21); + } + + /** + * Test for file newer than incoming window + * @throws IOException with error + */ + @Test + public void NewerThanIncomingWindow() throws IOException { + long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + long[] maxTimestamps = new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 18 }; + long[] sizes = new long[] { 30, 31, 32, 33, 34, 20, 21, 22, 23, 24, 95, 10, 11, 12, 13 }; + + compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), 13, 12, 11, 10); + } + + /** + * Older than now(161) - maxAge(100) + * @throws IOException with error + */ + @Test + public void olderThanMaxAge() throws IOException { + long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + long[] maxTimestamps = new long[] { 44, 60, 61, 96, 100, 104, 105, 106, 113, 145, 157 }; + long[] sizes = new long[] { 0, 50, 51, 40, 41, 42, 33, 30, 31, 2, 1 }; + + compactEquals(161, sfCreate(minTimestamps, maxTimestamps, sizes), 31, 30, 33, 42, 41, 40); + } + + /** + * Out-of-order data + * @throws IOException with error + */ + @Test + public void OutOfOrder() throws IOException { + long[] minTimestamps = new long[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + long[] maxTimestamps = new long[] { 0, 13, 3, 10, 11, 1, 2, 12, 14, 15 }; + long[] sizes = new long[] { 30, 31, 32, 33, 34, 22, 28, 23, 24, 1 }; + + compactEquals(16, sfCreate(minTimestamps, maxTimestamps, sizes), 1, 24, 23, 28, 22, 34, 33, 32, + 31); + } +} -- 1.9.1