From c49345158831f62e408871f40581bc3fe1c51901 Mon Sep 17 00:00:00 2001 From: anastas Date: Mon, 4 Jul 2016 14:28:20 +0300 Subject: [PATCH] My squashed commits --- .../org/apache/hadoop/hbase/util/ClassSize.java | 26 +- .../hbase/regionserver/AbstractMemStore.java | 4 +- .../hadoop/hbase/regionserver/CellArrayMap.java | 55 +++ .../hadoop/hbase/regionserver/CellFlatMap.java | 473 ++++++++++++++++++++ .../hbase/regionserver/CompactingMemStore.java | 63 ++- .../hbase/regionserver/CompactionPipeline.java | 49 +- .../hadoop/hbase/regionserver/DefaultMemStore.java | 2 +- .../hbase/regionserver/ImmutableSegment.java | 172 ++++++- .../hbase/regionserver/MemStoreCompactor.java | 219 +++++---- .../regionserver/MemStoreCompactorIterator.java | 154 +++++++ .../hadoop/hbase/regionserver/MemStoreScanner.java | 68 ++- .../hadoop/hbase/regionserver/MutableSegment.java | 20 +- .../apache/hadoop/hbase/regionserver/Segment.java | 67 ++- .../hadoop/hbase/regionserver/SegmentFactory.java | 30 +- .../hadoop/hbase/regionserver/SegmentScanner.java | 8 +- .../hbase/regionserver/VersionedSegmentsList.java | 12 +- .../org/apache/hadoop/hbase/io/TestHeapSize.java | 17 +- .../hadoop/hbase/regionserver/TestCellFlatSet.java | 143 ++++++ .../hbase/regionserver/TestCompactingMemStore.java | 25 +- .../TestCompactingToCellArrayMapMemStore.java | 250 +++++++++++ .../hadoop/hbase/regionserver/TestHRegion.java | 4 +- .../regionserver/TestHRegionWithInMemoryFlush.java | 101 +++++ .../TestWalAndCompactingMemStoreFlush.java | 14 +- hbase-shell/src/main/ruby/hbase/admin.rb | 2 +- 24 files changed, 1764 insertions(+), 214 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorIterator.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java index 41c93ea..bf4f4b0 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java @@ -29,6 +29,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; + /** * Class for determining the "size" of a class, an attempt to calculate the * actual bytes that an object of this class will occupy in memory @@ -81,6 +82,12 @@ public class ClassSize { /** Overhead for ConcurrentSkipListMap Entry */ public static final int CONCURRENT_SKIPLISTMAP_ENTRY; + /** Overhead for CellArrayMap */ + public static final int CELL_ARRAY_MAP; + + /** Overhead for Cell Array Entry */ + public static final int CELL_ARRAY_MAP_ENTRY; + /** Overhead for ReentrantReadWriteLock */ public static final int REENTRANT_LOCK; @@ -106,7 +113,7 @@ public class ClassSize { public static final int TIMERANGE_TRACKER; /** Overhead for CellSkipListSet */ - public static final int CELL_SKIPLIST_SET; + public static final int CELL_SET; public static final int STORE_SERVICES; @@ -262,9 +269,20 @@ public class ClassSize { // The size changes from jdk7 to jdk8, estimate the size rather than use a conditional CONCURRENT_SKIPLISTMAP = (int) estimateBase(ConcurrentSkipListMap.class, false); - CONCURRENT_SKIPLISTMAP_ENTRY = + // CELL_ARRAY_MAP is the size of an instance of CellArrayMap class, which extends + // CellFlatMap class. CellArrayMap object containing a ref to an Array, so + // OBJECT + REFERENCE + ARRAY + // CellFlatMap object contains two integers, one boolean and one reference to object, so + // 2*INT + BOOLEAN + REFERENCE + CELL_ARRAY_MAP = align(OBJECT + 2*Bytes.SIZEOF_INT + Bytes.SIZEOF_BOOLEAN + + ARRAY + 2*REFERENCE); + + CONCURRENT_SKIPLISTMAP_ENTRY = align( align(OBJECT + (3 * REFERENCE)) + /* one node per entry */ - align((OBJECT + (3 * REFERENCE))/2); /* one index per two entries */ + align((OBJECT + (3 * REFERENCE))/2)); /* one index per two entries */ + + // REFERENCE in the CellArrayMap and the Cell object itself + CELL_ARRAY_MAP_ENTRY = align(OBJECT + 2*REFERENCE + 2*Bytes.SIZEOF_INT); REENTRANT_LOCK = align(OBJECT + (3 * REFERENCE)); @@ -282,7 +300,7 @@ public class ClassSize { TIMERANGE_TRACKER = align(ClassSize.OBJECT + Bytes.SIZEOF_LONG * 2); - CELL_SKIPLIST_SET = align(OBJECT + REFERENCE); + CELL_SET = align(OBJECT + REFERENCE); STORE_SERVICES = align(OBJECT + REFERENCE + ATOMIC_LONG); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java index d25f960..d2b060d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java @@ -64,14 +64,14 @@ public abstract class AbstractMemStore implements MemStore { public final static long DEEP_OVERHEAD = ClassSize.align(FIXED_OVERHEAD + (ClassSize.ATOMIC_LONG + ClassSize.TIMERANGE_TRACKER + - ClassSize.CELL_SKIPLIST_SET + ClassSize.CONCURRENT_SKIPLISTMAP)); + ClassSize.CELL_SET + ClassSize.CONCURRENT_SKIPLISTMAP)); protected AbstractMemStore(final Configuration conf, final CellComparator c) { this.conf = conf; this.comparator = c; resetCellSet(); - this.snapshot = SegmentFactory.instance().createImmutableSegment(conf, c, 0); + this.snapshot = SegmentFactory.instance().createImmutableSegment(c, 0); this.snapshotId = NO_SNAPSHOT_ID; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java new file mode 100644 index 0000000..ee442ef --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java @@ -0,0 +1,55 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Cellersion 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY CellIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import java.util.Comparator; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * CellArrayMap is a simple array of Cells and cannot be allocated off-heap. + * As all java arrays CellArrayMap's array of references pointing to Cell objects. + */ +@InterfaceAudience.Private +public class CellArrayMap extends CellFlatMap { + + private final Cell[] block; + + /* The Cells Array is created only when CellArrayMap is created, all sub-CellBlocks use + * boundary indexes. The given Cell array must be ordered. */ + public CellArrayMap( + Comparator comparator, Cell[] b, int min, int max, boolean descending) { + super(comparator,min,max,descending); + this.block = b; + } + + /* To be used by base class only to create a sub-CellFlatMap */ + @Override + protected CellFlatMap createSubCellFlatMap(Comparator comparator, int min, int max, + boolean descending) { + return new CellArrayMap(comparator, this.block, min, max, descending); + } + + @Override + protected Cell getCell(int i) { + if( (i < minCellIdx) && (i >= maxCellIdx) ) return null; + return block[i]; + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java new file mode 100644 index 0000000..6811659 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java @@ -0,0 +1,473 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Cellersion 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY CellIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +import java.util.Collection; +import java.util.Comparator; +import java.util.concurrent.ConcurrentNavigableMap; +import java.util.Iterator; +import java.util.Map; +import java.util.NavigableSet; +import java.util.Set; + + + +/** + * CellFlatMap stores a constant number of elements and is immutable after creation stage. + * Being immutable, the CellFlatMap can be implemented as array. + * The actual array can be on- or off-heap and is implemented in concrete class derived from CellFlatMap. + * The CellFlatMap uses no synchronization primitives, it is assumed to be created by a + * single thread and then it can be read-only by multiple threads. + * + * The "flat" in the name, means that the memory layout of the Map is sequential array and thus + * requires less memory than ConcurrentSkipListMap. + */ +@InterfaceAudience.Private +public abstract class CellFlatMap implements ConcurrentNavigableMap { + + private final Comparator comparator; + protected int minCellIdx = 0; // the index of the minimal cell (for sub-sets) + protected int maxCellIdx = 0; // the index of the cell after the maximal cell (for sub-sets) + private boolean descending = false; + + /* C-tor */ + public CellFlatMap(Comparator comparator, int min, int max, boolean d){ + this.comparator = comparator; + this.minCellIdx = min; + this.maxCellIdx = max; + this.descending = d; + } + + /* Used for abstract CellFlatMap creation, implemented by derived class */ + protected abstract CellFlatMap createSubCellFlatMap(Comparator comparator, int min, + int max, boolean descending); + + /* Returns the i-th cell in the cell block */ + protected abstract Cell getCell(int i); + + /** + * Binary search for a given key in between given boundaries of the array. + * Positive returned numbers mean the index. + * Negative returned numbers means the key not found. + * The absolute value of the output is the + * possible insert index for the searched key: (-1 * insertion point) + * @param needle The key to look for in all of the entries + * @return Same return value as Arrays.binarySearch. + */ + private int find(Cell needle) { + int begin = minCellIdx; + int end = maxCellIdx - 1; + + while (begin <= end) { + int mid = (begin + end) >>> 1; + Cell midCell = getCell(mid); + int compareRes = comparator.compare(midCell, needle); + + if (compareRes == 0) { + return mid; // 0 means equals. We found the key + } + + if (compareRes < 0) { + // midCell is less than needle so we need to look at farther up + begin = mid + 1; + } else { + // midCell is greater than needle so we need to look down + end = mid - 1; + } + } + + return (-1 * begin); + } + + /* Get the index of the key taking into consideration whether + ** the key should be inclusive or exclusive */ + private int getValidIndex(Cell key, boolean inclusive) { + int index = find(key); + if (inclusive && index >= 0) { + index = (descending) ? index - 1 : index + 1; + } + index = Math.abs(index); + if (index < minCellIdx || index >= maxCellIdx) { + throw new IllegalArgumentException("Index " + index + + " out of boundary, when looking for key " + key); + } + return index; + } + + @Override + public Comparator comparator() { + return comparator; + } + + @Override + public int size() { + return maxCellIdx-minCellIdx; + } + + @Override + public boolean isEmpty() { + return ( size() == 0 ); + } + + + // ---------------- Sub-Maps ---------------- + @Override + public ConcurrentNavigableMap subMap( Cell fromKey, + boolean fromInclusive, + Cell toKey, + boolean toInclusive) { + int toIndex = getValidIndex(toKey, toInclusive); + int fromIndex = (getValidIndex(fromKey, !fromInclusive)); + + if (fromIndex > toIndex) { + throw new IllegalArgumentException("Inconsistent range, when looking from " + + fromKey + " to " + toKey); + } + return createSubCellFlatMap(comparator, fromIndex, toIndex, descending); + } + + @Override + public ConcurrentNavigableMap headMap(Cell toKey, boolean inclusive) { + int index = getValidIndex(toKey, inclusive); + return createSubCellFlatMap(comparator, minCellIdx, index, descending); + } + + @Override + public ConcurrentNavigableMap tailMap(Cell fromKey, boolean inclusive) { + int index = (getValidIndex(fromKey, !inclusive)); + return createSubCellFlatMap(comparator, index, maxCellIdx, descending); + } + + @Override + public ConcurrentNavigableMap descendingMap() { + return createSubCellFlatMap(comparator, minCellIdx, maxCellIdx, true); + } + + @Override + public ConcurrentNavigableMap subMap(Cell k1, Cell k2) { + return this.subMap(k1, true, k2, true); + } + + @Override + public ConcurrentNavigableMap headMap(Cell k) { + return this.headMap(k, true); + } + + @Override + public ConcurrentNavigableMap tailMap(Cell k) { + return this.tailMap(k, true); + } + + + // -------------------------------- Key's getters -------------------------------- + @Override + public Cell firstKey() { + if (isEmpty()) { + return null; + } + return descending ? getCell(maxCellIdx - 1) : getCell(minCellIdx); + } + + @Override + public Cell lastKey() { + if (isEmpty()) { + return null; + } + return descending ? getCell(minCellIdx) : getCell(maxCellIdx - 1); + } + + @Override + public Cell lowerKey(Cell k) { + if (isEmpty()) { + return null; + } + int index = find(k); + // If index>=0 there's a key exactly equal + index = (index>=0) ? index-1 : -(index); + return (index < minCellIdx || index >= maxCellIdx) ? null : getCell(index); + } + + @Override + public Cell floorKey(Cell k) { + if (isEmpty()) { + return null; + } + int index = find(k); + index = (index>=0) ? index : -(index); + return (index < minCellIdx || index >= maxCellIdx) ? null : getCell(index); + } + + @Override + public Cell ceilingKey(Cell k) { + if (isEmpty()) { + return null; + } + int index = find(k); + index = (index>=0) ? index : -(index)+1; + return (index < minCellIdx || index >= maxCellIdx) ? null : getCell(index); + } + + @Override + public Cell higherKey(Cell k) { + if (isEmpty()) { + return null; + } + int index = find(k); + index = (index>=0) ? index+1 : -(index)+1; + return (index < minCellIdx || index >= maxCellIdx) ? null : getCell(index); + } + + @Override + public boolean containsKey(Object o) { + int index = find((Cell) o); + return (index >= 0); + } + + @Override + public boolean containsValue(Object o) { // use containsKey(Object o) instead + throw new UnsupportedOperationException("Use containsKey(Object o) instead"); + } + + @Override + public Cell get(Object o) { + int index = find((Cell) o); + return (index >= 0) ? getCell(index) : null; + } + + // -------------------------------- Entry's getters -------------------------------- + // all interfaces returning Entries are unsupported because we are dealing only with the keys + @Override + public Entry lowerEntry(Cell k) { + throw new UnsupportedOperationException(); + } + + @Override + public Entry higherEntry(Cell k) { + throw new UnsupportedOperationException(); + } + + @Override + public Entry ceilingEntry(Cell k) { + throw new UnsupportedOperationException(); + } + + @Override + public Entry floorEntry(Cell k) { + throw new UnsupportedOperationException(); + } + + @Override + public Entry firstEntry() { + throw new UnsupportedOperationException(); + } + + @Override + public Entry lastEntry() { + throw new UnsupportedOperationException(); + } + + @Override + public Entry pollFirstEntry() { + throw new UnsupportedOperationException(); + } + + @Override + public Entry pollLastEntry() { + throw new UnsupportedOperationException(); + } + + + // -------------------------------- Updates -------------------------------- + // All updating methods below are unsupported. + // Assuming an array of Cells will be allocated externally, + // fill up with Cells and provided in construction time. + // Later the structure is immutable. + @Override + public Cell put(Cell k, Cell v) { + throw new UnsupportedOperationException(); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(); + } + + @Override + public Cell remove(Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean replace(Cell k, Cell v, Cell v1) { + throw new UnsupportedOperationException(); + } + + @Override + public void putAll(Map map) { + throw new UnsupportedOperationException(); + } + + @Override + public Cell putIfAbsent(Cell k, Cell v) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean remove(Object o, Object o1) { + throw new UnsupportedOperationException(); + } + + @Override + public Cell replace(Cell k, Cell v) { + throw new UnsupportedOperationException(); + } + + + // -------------------------------- Sub-Sets -------------------------------- + @Override + public NavigableSet navigableKeySet() { + throw new UnsupportedOperationException(); + } + + @Override + public NavigableSet descendingKeySet() { + throw new UnsupportedOperationException(); + } + + @Override + public NavigableSet keySet() { + throw new UnsupportedOperationException(); + } + + @Override + public Collection values() { + return new CellFlatMapCollection(); + } + + @Override + public Set> entrySet() { + throw new UnsupportedOperationException(); + } + + + // -------------------------------- Iterator K -------------------------------- + private final class CellFlatMapIterator implements Iterator { + int index; + + private CellFlatMapIterator() { + index = descending ? maxCellIdx-1 : minCellIdx; + } + + @Override + public boolean hasNext() { + return descending ? (index >= minCellIdx) : (index < maxCellIdx); + } + + @Override + public Cell next() { + Cell result = getCell(index); + if (descending) { + index--; + } else { + index++; + } + return result; + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + } + + // -------------------------------- Collection -------------------------------- + private final class CellFlatMapCollection implements Collection { + + @Override + public int size() { + return CellFlatMap.this.size(); + } + + @Override + public boolean isEmpty() { + return CellFlatMap.this.isEmpty(); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean contains(Object o) { + return containsKey(o); + } + + @Override + public Iterator iterator() { + return new CellFlatMapIterator(); + } + + @Override + public Object[] toArray() { + throw new UnsupportedOperationException(); + } + + @Override + public T[] toArray(T[] ts) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean add(Cell k) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean remove(Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean containsAll(Collection collection) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean addAll(Collection collection) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean removeAll(Collection collection) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean retainAll(Collection collection) { + throw new UnsupportedOperationException(); + } + + + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java index 0ca8af0..12c02d8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java @@ -51,9 +51,14 @@ import org.apache.hadoop.hbase.wal.WAL; */ @InterfaceAudience.Private public class CompactingMemStore extends AbstractMemStore { - public final static long DEEP_OVERHEAD_PER_PIPELINE_ITEM = ClassSize.align( + + public final static long DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM = ClassSize.align( + ClassSize.TIMERANGE_TRACKER + ClassSize.CELL_SET + ClassSize.CONCURRENT_SKIPLISTMAP); + + public final static long DEEP_OVERHEAD_PER_PIPELINE_CELL_ARRAY_ITEM = ClassSize.align( ClassSize.TIMERANGE_TRACKER + ClassSize.TIMERANGE + - ClassSize.CELL_SKIPLIST_SET + ClassSize.CONCURRENT_SKIPLISTMAP); + ClassSize.CELL_SET + ClassSize.CELL_ARRAY_MAP); + // Default fraction of in-memory-flush size w.r.t. flush-to-disk size public static final String IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY = "hbase.memstore.inmemoryflush.threshold.factor"; @@ -64,8 +69,8 @@ public class CompactingMemStore extends AbstractMemStore { private RegionServicesForStores regionServices; private CompactionPipeline pipeline; private MemStoreCompactor compactor; - // the threshold on active size for in-memory flush - private long inmemoryFlushSize; + + private long inmemoryFlushSize; // the threshold on active size for in-memory flush private final AtomicBoolean inMemoryFlushInProgress = new AtomicBoolean(false); @VisibleForTesting private final AtomicBoolean allowCompaction = new AtomicBoolean(true); @@ -96,7 +101,7 @@ public class CompactingMemStore extends AbstractMemStore { } public static long getSegmentSize(Segment segment) { - return segment.getSize() - DEEP_OVERHEAD_PER_PIPELINE_ITEM; + return segment.getInternalSize(); } public static long getSegmentsSize(List list) { @@ -203,15 +208,24 @@ public class CompactingMemStore extends AbstractMemStore { this.inMemoryFlushInProgress.set(inMemoryFlushInProgress); } - public void swapCompactedSegments(VersionedSegmentsList versionedList, ImmutableSegment result) { - pipeline.swap(versionedList, result); + public boolean swapCompactedSegments(VersionedSegmentsList versionedList, ImmutableSegment result) { + return pipeline.swap(versionedList, result); } - public boolean hasCompactibleSegments() { + /** + * @param requesterVersion The caller must hold the VersionedList of the pipeline + * with version taken earlier. This version must be passed as a parameter here. + * The flattening happens only if versions match. + */ + public void flattenOneSegment(long requesterVersion) { + pipeline.flattenYoungestSegment(requesterVersion); + } + + public boolean hasImmutableSegments() { return !pipeline.isEmpty(); } - public VersionedSegmentsList getCompactibleSegments() { + public VersionedSegmentsList getImmutableSegments() { return pipeline.getVersionedList(); } @@ -243,8 +257,7 @@ public class CompactingMemStore extends AbstractMemStore { order--; } list.add(getSnapshot().getSegmentScanner(readPt, order)); - return Collections. singletonList( - new MemStoreScanner((AbstractMemStore) this, list, readPt)); + return Collections. singletonList(new MemStoreScanner(getComparator(), list)); } /** @@ -288,17 +301,20 @@ public class CompactingMemStore extends AbstractMemStore { } // Phase II: Compact the pipeline try { - if (allowCompaction.get() && inMemoryFlushInProgress.compareAndSet(false, true)) { + if (allowCompaction.get()) { // setting the inMemoryFlushInProgress flag again for the case this method is invoked // directly (only in tests) in the common path setting from true to true is idempotent + inMemoryFlushInProgress.set(true); // Speculative compaction execution, may be interrupted if flush is forced while // compaction is in progress - compactor.startCompaction(); + compactor.start(); } } catch (IOException e) { LOG.warn("Unable to run memstore compaction. region " + getRegionServices().getRegionInfo().getRegionNameAsString() + "store: "+ getFamilyName(), e); + } finally { + stopCompaction(); } } @@ -311,9 +327,10 @@ public class CompactingMemStore extends AbstractMemStore { } private boolean shouldFlushInMemory() { - if(getActive().getSize() > inmemoryFlushSize) { - // size above flush threshold - return (allowCompaction.get() && !inMemoryFlushInProgress.get()); + if(getActive().getSize() > inmemoryFlushSize) { // size above flush threshold + // the inMemoryFlushInProgress is CASed to be true here in order to mutual exclude + // the insert of the active into the compaction pipeline + return (inMemoryFlushInProgress.compareAndSet(false,true)); } return false; } @@ -325,14 +342,14 @@ public class CompactingMemStore extends AbstractMemStore { */ private void stopCompaction() { if (inMemoryFlushInProgress.get()) { - compactor.stopCompact(); + compactor.stop(); inMemoryFlushInProgress.set(false); } } private void pushActiveToPipeline(MutableSegment active) { if (!active.isEmpty()) { - long delta = DEEP_OVERHEAD_PER_PIPELINE_ITEM - DEEP_OVERHEAD; + long delta = DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM - DEEP_OVERHEAD; active.setSize(active.getSize() + delta); pipeline.pushHead(active); resetCellSet(); @@ -403,4 +420,14 @@ public class CompactingMemStore extends AbstractMemStore { } return lowest; } + + // debug method + public void debug() { + String msg = "active size="+getActive().getSize(); + msg += " threshold="+IN_MEMORY_FLUSH_THRESHOLD_FACTOR_DEFAULT* inmemoryFlushSize; + msg += " allow compaction is "+ (allowCompaction.get() ? "true" : "false"); + msg += " inMemoryFlushInProgress is "+ (inMemoryFlushInProgress.get() ? "true" : "false"); + LOG.debug(msg); + } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java index 3ecd11c1..4d5bcb4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java @@ -45,7 +45,7 @@ public class CompactionPipeline { private static final ImmutableSegment EMPTY_MEM_STORE_SEGMENT = SegmentFactory.instance() .createImmutableSegment(null, - CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_ITEM); + CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM); public CompactionPipeline(RegionServicesForStores region) { this.region = region; @@ -117,6 +117,52 @@ public class CompactionPipeline { return true; } + /** + * If the caller holds the current version, go over the the pipeline and try to flatten each + * segment. Flattening is replacing the ConcurrentSkipListMap based CellSet to CellArrayMap based. + * Flattening of the segment that initially is not based on ConcurrentSkipListMap has no effect. + * Return after one segment is successfully flatten. + * + * @return true iff a segment was successfully flattened + */ + public boolean flattenYoungestSegment(long requesterVersion) { + + if(requesterVersion != version) { + LOG.warn("Segment flattening failed, because versions do not match. Requester version: " + + requesterVersion + ", actual version: " + version); + return false; + } + + synchronized (pipeline){ + if(requesterVersion != version) { + LOG.info("Segment flattening failed, because versions do not match"); + return false; + } + + long sizeBeforeFlat = 0; + long globalMemstoreSize = 0; + + for (ImmutableSegment s : pipeline) { + // remember the old size in case this segment is going to be flatten + sizeBeforeFlat = s.getInternalSize(); + if (s.flatten()) { + if(region != null) { + long sizeAfterFlat = s.getInternalSize(); + long delta = sizeBeforeFlat - sizeAfterFlat; + globalMemstoreSize = region.addAndGetGlobalMemstoreSize(-delta); + } + LOG.info("Compaction pipeline segment " + s + " was flattened; globalMemstoreSize: " + + globalMemstoreSize); + return true; + } + } + + } + // do not update the global memstore size counter and do not increase the version, + // because all the cells remain in place + return false; + } + public boolean isEmpty() { return pipeline.isEmpty(); } @@ -170,7 +216,6 @@ public class CompactionPipeline { // empty suffix is always valid return true; } - Iterator pipelineBackwardIterator = pipeline.descendingIterator(); Iterator suffixBackwardIterator = suffix.descendingIterator(); ImmutableSegment suffixCurrent; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java index c21dbb5..2e1eefb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java @@ -119,7 +119,7 @@ public class DefaultMemStore extends AbstractMemStore { list.add(getActive().getSegmentScanner(readPt, 1)); list.add(getSnapshot().getSegmentScanner(readPt, 0)); return Collections. singletonList( - new MemStoreScanner((AbstractMemStore) this, list, readPt)); + new MemStoreScanner(getComparator(), list)); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java index 13d9fbf..5143aae 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java @@ -18,11 +18,23 @@ */ package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.util.CollectionBackedScanner; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.SimpleMutableByteRange; +import org.apache.hadoop.hbase.util.ByteRange; +import org.apache.hadoop.hbase.util.Bytes; + +import java.io.IOException; /** * ImmutableSegment is an abstract class that extends the API supported by a {@link Segment}, @@ -38,12 +50,83 @@ public class ImmutableSegment extends Segment { */ private final TimeRange timeRange; + /** + * Types of ImmutableSegment + */ + public enum Type { + SKIPLIST_MAP_BASED, + ARRAY_MAP_BASED, + } + + private Type type = Type.SKIPLIST_MAP_BASED; + + // whether it is based on CellFlatMap or ConcurrentSkipListMap + private boolean isFlat(){ + return (type == Type.ARRAY_MAP_BASED); + } + + ///////////////////// CONSTRUCTORS ///////////////////// + /**------------------------------------------------------------------------ + * Copy C-tor to be used when new ImmutableSegment is being built from a Mutable one. + * This C-tor should be used when active MutableSegment is pushed into the compaction + * pipeline and becomes an ImmutableSegment. + */ protected ImmutableSegment(Segment segment) { super(segment); + type = Type.SKIPLIST_MAP_BASED; TimeRangeTracker trt = getTimeRangeTracker(); this.timeRange = trt == null? null: trt.toTimeRange(); } + /**------------------------------------------------------------------------ + * C-tor to be used when new ImmutableSegment is a result of compaction of a list + * of older ImmutableSegments. + * The given iterator returns the Cells that "survived" the compaction. + * According to the boolean parameter "array" the new ImmutableSegment is built based on + * CellArrayMap or CellChunkMap. + */ + protected ImmutableSegment( + final Configuration conf, CellComparator comparator, MemStoreCompactorIterator iterator, + MemStoreLAB memStoreLAB, int numOfCells, Type type) { + + super(null, comparator, memStoreLAB, + CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_CELL_ARRAY_ITEM, + ClassSize.CELL_ARRAY_MAP_ENTRY); + + CellSet cs = null; // build the CellSet Cell array or Byte array based + cs = createCellArrayMapSet(numOfCells, iterator); + + this.setCellSet(null, cs); // update the CellSet of the new Segment + this.type = type; + TimeRangeTracker trt = getTimeRangeTracker(); + this.timeRange = trt == null? null: trt.toTimeRange(); + } + + /**------------------------------------------------------------------------ + * C-tor to be used when new SKIP-LIST BASED ImmutableSegment is a result of compaction of a + * list of older ImmutableSegments. + * The given iterator returns the Cells that "survived" the compaction. + */ + protected ImmutableSegment( + CellComparator comparator, MemStoreCompactorIterator iterator, MemStoreLAB memStoreLAB) { + + super(new CellSet(comparator), comparator, memStoreLAB, + CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM, ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY); + + while (iterator.hasNext()) { + Cell c = iterator.next(); + // The scanner is doing all the elimination logic + // now we just copy it to the new segment + KeyValue kv = KeyValueUtil.ensureKeyValue(c); + Cell newKV = maybeCloneWithAllocator(kv); + internalAdd(newKV); + } + type = Type.SKIPLIST_MAP_BASED; + TimeRangeTracker trt = getTimeRangeTracker(); + this.timeRange = trt == null? null: trt.toTimeRange(); + } + + ///////////////////// PUBLIC METHODS ///////////////////// /** * Builds a special scanner for the MemStoreSnapshot object that is different than the * general segment scanner. @@ -64,4 +147,91 @@ public class ImmutableSegment extends Segment { return this.timeRange.getMin(); } -} \ No newline at end of file + @Override + public long getInternalSize() { + switch (type){ + case SKIPLIST_MAP_BASED: + return size.get() - CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM; + case ARRAY_MAP_BASED: + return size.get() - CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_CELL_ARRAY_ITEM; + default: throw new IllegalStateException(); + } + } + + /**------------------------------------------------------------------------ + * Change the CellSet of this ImmutableSegment from one based on ConcurrentSkipListMap to one + * based on CellArrayMap. + * If this ImmutableSegment is not based on ConcurrentSkipListMap , this is NOP + * For now the change from ConcurrentSkipListMap to CellChunkMap is not supported, because + * this requires the Cell to know on which Chunk it is placed. + * + * Synchronization of the CellSet replacement: + * The reference to the CellSet is AtomicReference and is updated only when ImmutableSegment + * is constructed (single thread) or flattened. The flattening happens as part of a single + * thread of compaction, but to be on the safe side the initial CellSet is locally saved + * before the flattening and then replaced using CAS instruction. + */ + public boolean flatten() { + if (isFlat()) return false; + CellSet oldCellSet = getCellSet(); + int numOfCells = getCellsCount(); + + // each Cell is now represented in CellArrayMap + constantCellMetaDataSize = ClassSize.CELL_ARRAY_MAP_ENTRY; + + // arrange the meta-data size, decrease all meta-data sizes related to SkipList + incSize( + -(ClassSize.CONCURRENT_SKIPLISTMAP + numOfCells * ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY)); + // add size of CellArrayMap and meta-data overhead per Cell + incSize(ClassSize.CELL_ARRAY_MAP + numOfCells * constantCellMetaDataSize); + + CellSet newCellSet = recreateCellArrayMapSet(numOfCells); // build the CellSet CellArrayMap based + setCellSet(oldCellSet,newCellSet); + return true; + } + + ///////////////////// PRIVATE METHODS ///////////////////// + /*------------------------------------------------------------------------*/ + // Create CellSet based on CellArrayMap from compacting iterator + private CellSet createCellArrayMapSet(int numOfCells, MemStoreCompactorIterator iterator) { + + Cell[] cells = new Cell[numOfCells]; // build the Cell Array + int i = 0; + while (iterator.hasNext()) { + Cell c = iterator.next(); + // The scanner behind the iterator is doing all the elimination logic + // now we just copy it to the new segment (also MSLAB copy) + KeyValue kv = KeyValueUtil.ensureKeyValue(c); + cells[i++] = maybeCloneWithAllocator(kv); + // last parameter false, because in compaction count both Heap (Data) and MetaData size + updateMetaInfo(c,true); + } + // build the immutable CellSet + CellArrayMap cam = new CellArrayMap(getComparator(), cells, 0, i, false); + return new CellSet(cam); + } + + /*------------------------------------------------------------------------*/ + // Create CellSet based on CellArrayMap from current ConcurrentSkipListMap based CellSet + // (without compacting iterator) + private CellSet recreateCellArrayMapSet(int numOfCells) { + + Cell[] cells = new Cell[numOfCells]; // build the Cell Array + Cell curCell; + int idx = 0; + // create this segment scanner with maximal possible read point, to go over all Cells + SegmentScanner segmentScanner = this.getSegmentScanner(Long.MAX_VALUE); + + try { + while ((curCell = segmentScanner.next()) != null) { + cells[idx++] = curCell; + } + } catch (IOException ie) { + throw new IllegalStateException(ie); + } + // build the immutable CellSet + CellArrayMap cam = new CellArrayMap(getComparator(), cells, 0, idx, false); + return new CellSet(cam); + } + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java index a363e95..9b279e7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java @@ -32,8 +32,11 @@ import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; /** - * The ongoing MemStore Compaction manager, dispatches a solo running compaction - * and interrupts the compaction if requested. + * The ongoing MemStore Compaction manager, dispatches a solo running compaction and interrupts + * the compaction if requested. The compaction is interrupted and stopped by CompactingMemStore, + * for example when another compaction needs to be started. + * Prior to compaction the MemStoreCompactor evaluates + * the compacting ratio and aborts the compaction if it is not worthy. * The MemStoreScanner is used to traverse the compaction pipeline. The MemStoreScanner * is included in internal store scanner, where all compaction logic is implemented. * Threads safety: It is assumed that the compaction pipeline is immutable, @@ -42,47 +45,69 @@ import java.util.concurrent.atomic.AtomicBoolean; @InterfaceAudience.Private class MemStoreCompactor { + // Possibility for external guidance whether to flatten the segments without compaction + static final String MEMSTORE_COMPACTOR_FLATTENING = "hbase.hregion.compacting.memstore.flatten"; + static final boolean MEMSTORE_COMPACTOR_FLATTENING_DEFAULT = true; + + // Possibility for external setting of the compacted structure (SkipList, CellArray, etc.) + static final String COMPACTING_MEMSTORE_TYPE_KEY = "hbase.hregion.compacting.memstore.type"; + static final int COMPACTING_MEMSTORE_TYPE_DEFAULT = 1; + + public final static double COMPACTION_THRESHOLD_REMAIN_FRACTION = 0.8; + private static final Log LOG = LogFactory.getLog(MemStoreCompactor.class); private CompactingMemStore compactingMemStore; - private MemStoreScanner scanner; // scanner for pipeline only - // scanner on top of MemStoreScanner that uses ScanQueryMatcher - private StoreScanner compactingScanner; - - // smallest read point for any ongoing MemStore scan - private long smallestReadPoint; // a static version of the segment list from the pipeline private VersionedSegmentsList versionedList; + + // a flag raised when compaction is requested to stop private final AtomicBoolean isInterrupted = new AtomicBoolean(false); + // the limit to the size of the groups to be later provided to MemStoreCompactorIterator + private final int compactionKVMax; + + /** + * Types of Compaction + */ + public enum Type { + COMPACT_TO_SKIPLIST_MAP, + COMPACT_TO_ARRAY_MAP + } + + private Type type = Type.COMPACT_TO_SKIPLIST_MAP; + public MemStoreCompactor(CompactingMemStore compactingMemStore) { this.compactingMemStore = compactingMemStore; + this.compactionKVMax = compactingMemStore.getConfiguration().getInt( + HConstants.COMPACTION_KV_MAX, HConstants.COMPACTION_KV_MAX_DEFAULT); + } - /** + /**---------------------------------------------------------------------- * The request to dispatch the compaction asynchronous task. * The method returns true if compaction was successfully dispatched, or false if there - * is already an ongoing compaction or nothing to compact. + * is already an ongoing compaction or no segments to compact. */ - public boolean startCompaction() throws IOException { - if (!compactingMemStore.hasCompactibleSegments()) return false; // no compaction on empty - - List scanners = new ArrayList(); - // get the list of segments from the pipeline - versionedList = compactingMemStore.getCompactibleSegments(); - // the list is marked with specific version - - // create the list of scanners with maximally possible read point, meaning that - // all KVs are going to be returned by the pipeline traversing - for (Segment segment : versionedList.getStoreSegments()) { - scanners.add(segment.getSegmentScanner(Long.MAX_VALUE)); + public boolean start() throws IOException { + if (!compactingMemStore.hasImmutableSegments()) return false; // no compaction on empty + + int t = compactingMemStore.getConfiguration().getInt(COMPACTING_MEMSTORE_TYPE_KEY, + COMPACTING_MEMSTORE_TYPE_DEFAULT); + switch (t) { + case 1: type = Type.COMPACT_TO_SKIPLIST_MAP; + LOG.trace("Creating CompactingMemStore that is going to compact to SkipList data structure " + + " for store: " + compactingMemStore.getFamilyName()); + break; + case 2: type = Type.COMPACT_TO_ARRAY_MAP; + LOG.trace("Creating CompactingMemStore that is going to compact to CellArray data structure " + + " for store: " + compactingMemStore.getFamilyName()); + break; } - scanner = - new MemStoreScanner(compactingMemStore, scanners, Long.MAX_VALUE, - MemStoreScanner.Type.COMPACT_FORWARD); - smallestReadPoint = compactingMemStore.getSmallestReadPoint(); - compactingScanner = createScanner(compactingMemStore.getStore()); + // get a snapshot of the list of the segments from the pipeline, + // this local copy of the list is marked with specific version + versionedList = compactingMemStore.getImmutableSegments(); if (LOG.isDebugEnabled()) { LOG.debug("Starting the MemStore in-memory compaction for store " @@ -93,109 +118,131 @@ class MemStoreCompactor { return true; } - /** + /**---------------------------------------------------------------------- * The request to cancel the compaction asynchronous task * The compaction may still happen if the request was sent too late * Non-blocking request */ - public void stopCompact() { + public void stop() { isInterrupted.compareAndSet(false, true); } - /** + /**---------------------------------------------------------------------- * Close the scanners and clear the pointers in order to allow good * garbage collection */ private void releaseResources() { isInterrupted.set(false); - scanner.close(); - scanner = null; - compactingScanner.close(); - compactingScanner = null; versionedList = null; } - /** + /**---------------------------------------------------------------------- * The worker thread performs the compaction asynchronously. * The solo (per compactor) thread only reads the compaction pipeline. * There is at most one thread per memstore instance. */ private void doCompaction() { + ImmutableSegment result = null; + boolean resultSwapped = false; + int immutCellsNum = versionedList.getNumOfCells(); // number of immutable cells + boolean toFlatten = // the option to flatten or not to flatten + compactingMemStore.getConfiguration().getBoolean(MEMSTORE_COMPACTOR_FLATTENING, + MEMSTORE_COMPACTOR_FLATTENING_DEFAULT); - ImmutableSegment result = SegmentFactory.instance() // create the scanner - .createImmutableSegment( - compactingMemStore.getConfiguration(), compactingMemStore.getComparator(), - CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_ITEM); - - // the compaction processing try { - // Phase I: create the compacted MutableCellSetSegment - compactSegments(result); + // PHASE I: estimate the compaction expedience - EVALUATE COMPACTION + if (toFlatten) { + immutCellsNum = countCellsForCompaction(); + + if ( !isInterrupted.get() && + (immutCellsNum + > COMPACTION_THRESHOLD_REMAIN_FRACTION * versionedList.getNumOfCells())) { + // too much cells "survive" the possible compaction, we do not want to compact! + LOG.debug("In-Memory compaction does not pay off - storing the flattened segment" + + " for store: " + compactingMemStore.getFamilyName()); + // Looking for Segment in the pipeline with SkipList index, to make it flat + compactingMemStore.flattenOneSegment(versionedList.getVersion()); + return; + } + } + + // PHASE II: create the new compacted ImmutableSegment - START COPY-COMPACTION + if (!isInterrupted.get()) { + result = compact(immutCellsNum); + } - // Phase II: swap the old compaction pipeline + // Phase III: swap the old compaction pipeline - END COPY-COMPACTION if (!isInterrupted.get()) { - compactingMemStore.swapCompactedSegments(versionedList, result); + resultSwapped = compactingMemStore.swapCompactedSegments(versionedList, result); // update the wal so it can be truncated and not get too long compactingMemStore.updateLowestUnflushedSequenceIdInWAL(true); // only if greater } } catch (Exception e) { - LOG.debug("Interrupting the MemStore in-memory compaction for store " + compactingMemStore - .getFamilyName()); + LOG.debug("Interrupting the MemStore in-memory compaction for store " + + compactingMemStore.getFamilyName()); Thread.currentThread().interrupt(); - return; } finally { + if ((result != null) && (!resultSwapped)) result.close(); releaseResources(); - compactingMemStore.setInMemoryFlushInProgress(false); } } - /** - * Creates the scanner for compacting the pipeline. - * - * @return the scanner + /**---------------------------------------------------------------------- + * The copy-compaction is the creation of the ImmutableSegment (from the relevant type) + * based on the Compactor Iterator. The new ImmutableSegment is returned. */ - private StoreScanner createScanner(Store store) throws IOException { + private ImmutableSegment compact(int numOfCells) + throws IOException { - Scan scan = new Scan(); - scan.setMaxVersions(); //Get all available versions + LOG.info("Starting in-memory compaction of type: " + type + ". Before compaction we have " + + numOfCells + " cells in the entire compaction pipeline"); - StoreScanner internalScanner = - new StoreScanner(store, store.getScanInfo(), scan, Collections.singletonList(scanner), - ScanType.COMPACT_RETAIN_DELETES, smallestReadPoint, HConstants.OLDEST_TIMESTAMP); + ImmutableSegment result = null; + MemStoreCompactorIterator iterator = + new MemStoreCompactorIterator(versionedList.getStoreSegments(), + compactingMemStore.getComparator(), + compactionKVMax, compactingMemStore.getStore()); + try { + switch (type) { + case COMPACT_TO_SKIPLIST_MAP: + result = SegmentFactory.instance().createImmutableSegment( + compactingMemStore.getConfiguration(), compactingMemStore.getComparator(), iterator); + break; + case COMPACT_TO_ARRAY_MAP: + result = SegmentFactory.instance().createImmutableSegment( + compactingMemStore.getConfiguration(), compactingMemStore.getComparator(), iterator, + numOfCells, ImmutableSegment.Type.ARRAY_MAP_BASED); + break; + default: throw new RuntimeException("Unknown type " + type); // sanity check + } + } finally { + iterator.close(); + } - return internalScanner; + return result; } - /** - * Updates the given single Segment using the internal store scanner, - * who in turn uses ScanQueryMatcher + /**---------------------------------------------------------------------- + * Count cells to estimate the efficiency of the future compaction */ - private void compactSegments(Segment result) throws IOException { - - List kvs = new ArrayList(); - // get the limit to the size of the groups to be returned by compactingScanner - int compactionKVMax = compactingMemStore.getConfiguration().getInt( - HConstants.COMPACTION_KV_MAX, - HConstants.COMPACTION_KV_MAX_DEFAULT); - - ScannerContext scannerContext = - ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); - - boolean hasMore; - do { - hasMore = compactingScanner.next(kvs, scannerContext); - if (!kvs.isEmpty()) { - for (Cell c : kvs) { - // The scanner is doing all the elimination logic - // now we just copy it to the new segment - Cell newKV = result.maybeCloneWithAllocator(c); - result.internalAdd(newKV); + private int countCellsForCompaction() throws IOException { - } - kvs.clear(); + int cnt = 0; + MemStoreCompactorIterator iterator = + new MemStoreCompactorIterator( + versionedList.getStoreSegments(), compactingMemStore.getComparator(), + compactionKVMax, compactingMemStore.getStore()); + + try { + while (iterator.next() != null) { + cnt++; } - } while (hasMore && (!isInterrupted.get())); + } finally { + iterator.close(); + } + + return cnt; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorIterator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorIterator.java new file mode 100644 index 0000000..029cbf9 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactorIterator.java @@ -0,0 +1,154 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.client.Scan; + +import java.io.IOException; +import java.util.*; + +/** + * The MemStoreCompactorIterator is designed to perform one iteration over given list of segments + * For another iteration new instance of MemStoreCompactorIterator needs to be created + * The iterator is not thread-safe and must have only one instance in each period of time + */ +@InterfaceAudience.Private +public class MemStoreCompactorIterator implements Iterator { + + private List kvs = new ArrayList(); + + // scanner for full or partial pipeline (heap of segment scanners) + // we need to keep those scanners in order to close them at the end + private KeyValueScanner scanner; + + // scanner on top of pipeline scanner that uses ScanQueryMatcher + private StoreScanner compactingScanner; + + private final ScannerContext scannerContext; + + private boolean hasMore; + private Iterator kvsIterator; + + // C-tor + public MemStoreCompactorIterator(LinkedList segments, + CellComparator comparator, int compactionKVMax, Store store) throws IOException { + + this.scannerContext = ScannerContext.newBuilder().setBatchLimit(compactionKVMax).build(); + + // list of Scanners of segments in the pipeline, when compaction starts + List scanners = new ArrayList(); + + // create the list of scanners with maximally possible read point, meaning that + // all KVs are going to be returned by the pipeline traversing + for (Segment segment : segments) { + scanners.add(segment.getSegmentScanner(store.getSmallestReadPoint())); + } + + scanner = new MemStoreScanner(comparator, scanners, MemStoreScanner.Type.COMPACT_FORWARD); + + // reinitialize the compacting scanner for each instance of iterator + compactingScanner = createScanner(store, scanner); + + hasMore = compactingScanner.next(kvs, scannerContext); + + if (!kvs.isEmpty()) { + kvsIterator = kvs.iterator(); + } + + } + + @Override + public boolean hasNext() { + if (!kvsIterator.hasNext()) { + // refillKVS() method should be invoked only if !kvsIterator.hasNext() + if (!refillKVS()) { + return false; + } + } + return hasMore; + } + + @Override + public Cell next() { + if (!kvsIterator.hasNext()) { + // refillKVS() method should be invoked only if !kvsIterator.hasNext() + if (!refillKVS()) return null; + } + return (!hasMore) ? null : kvsIterator.next(); + } + + public void close() { + compactingScanner.close(); + compactingScanner = null; + scanner.close(); + scanner = null; + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + /** + * Creates the scanner for compacting the pipeline. + * + * @return the scanner + */ + private StoreScanner createScanner(Store store, KeyValueScanner scanner) + throws IOException { + + Scan scan = new Scan(); + scan.setMaxVersions(); //Get all available versions + StoreScanner internalScanner = + new StoreScanner(store, store.getScanInfo(), scan, Collections.singletonList(scanner), + ScanType.COMPACT_RETAIN_DELETES, store.getSmallestReadPoint(), + HConstants.OLDEST_TIMESTAMP); + + return internalScanner; + } + + + + private boolean refillKVS() { + kvs.clear(); // clear previous KVS, first initiated in the constructor + if (!hasMore) { // if there is nothing expected next in compactingScanner + return false; + } + + try { // try to get next KVS + hasMore = compactingScanner.next(kvs, scannerContext); + } catch (IOException ie) { + throw new IllegalStateException(ie); + } + + if (!kvs.isEmpty() ) { // is the new KVS empty ? + kvsIterator = kvs.iterator(); + return true; + } + return false; + } + + +} + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreScanner.java index 3d31d2a..74d061c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreScanner.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.List; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; @@ -30,7 +31,7 @@ import org.apache.htrace.Trace; /** * This is the scanner for any MemStore implementation, derived from MemStore. - * The MemStoreScanner combines SegmentScanner from different Segments and + * The MemStoreScanner combines KeyValueScanner from different Segments and * uses the key-value heap and the reversed key-value heap for the aggregated key-values set. * It is assumed that only traversing forward or backward is used (without zigzagging in between) */ @@ -55,61 +56,50 @@ public class MemStoreScanner extends NonLazyKeyValueScanner { // or according to the first usage private Type type = Type.UNDEFINED; - private long readPoint; // remember the initial version of the scanners list List scanners; - // pointer back to the relevant MemStore - // is needed for shouldSeek() method - private AbstractMemStore backwardReferenceToMemStore; - /** - * If UNDEFINED type for MemStoreScanner is provided, the forward heap is used as default! - * After constructor only one heap is going to be initialized for entire lifespan - * of the MemStoreScanner. A specific scanner can only be one directional! - * - * @param ms Pointer back to the MemStore - * @param scanners List of scanners over the segments - * @param readPt Read point below which we can safely remove duplicate KVs - */ - public MemStoreScanner(AbstractMemStore ms, List scanners, long readPt) - throws IOException { - this(ms, scanners, readPt, Type.UNDEFINED); - } + private final CellComparator comparator; /** * If UNDEFINED type for MemStoreScanner is provided, the forward heap is used as default! * After constructor only one heap is going to be initialized for entire lifespan * of the MemStoreScanner. A specific scanner can only be one directional! * - * @param ms Pointer back to the MemStore - * @param scanners List of scanners over the segments - * @param readPt Read point below which we can safely remove duplicate KVs - * @param type The scan type COMPACT_FORWARD should be used for compaction + * @param comparator Cell Comparator + * @param scanners List of scanners, from which the heap will be built + * @param type The scan type COMPACT_FORWARD should be used for compaction */ - public MemStoreScanner(AbstractMemStore ms, List scanners, long readPt, - Type type) throws IOException { + public MemStoreScanner(CellComparator comparator, List scanners, Type type) + throws IOException { super(); - this.readPoint = readPt; this.type = type; switch (type) { - case UNDEFINED: - case USER_SCAN_FORWARD: - case COMPACT_FORWARD: - this.forwardHeap = new KeyValueHeap(scanners, ms.getComparator()); - break; - case USER_SCAN_BACKWARD: - this.backwardHeap = new ReversedKeyValueHeap(scanners, ms.getComparator()); - break; - default: - throw new IllegalArgumentException("Unknown scanner type in MemStoreScanner"); + case UNDEFINED: + case USER_SCAN_FORWARD: + case COMPACT_FORWARD: + this.forwardHeap = new KeyValueHeap(scanners, comparator); + break; + case USER_SCAN_BACKWARD: + this.backwardHeap = new ReversedKeyValueHeap(scanners, comparator); + break; + default: + throw new IllegalArgumentException("Unknown scanner type in MemStoreScanner"); } - this.backwardReferenceToMemStore = ms; + this.comparator = comparator; this.scanners = scanners; if (Trace.isTracing() && Trace.currentSpan() != null) { Trace.currentSpan().addTimelineAnnotation("Creating MemStoreScanner"); } } + /* Constructor used only when the scan usage is unknown + and need to be defined according to the first move */ + public MemStoreScanner(CellComparator comparator, List scanners) + throws IOException { + this(comparator, scanners, Type.UNDEFINED); + } + /** * Returns the cell from the top-most scanner without advancing the iterator. * The backward traversal is assumed, only if specified explicitly @@ -135,7 +125,7 @@ public class MemStoreScanner extends NonLazyKeyValueScanner { currentCell != null; currentCell = heap.next()) { - // all the logic of presenting cells is inside the internal SegmentScanners + // all the logic of presenting cells is inside the internal KeyValueScanners // located inside the heap return currentCell; @@ -297,7 +287,7 @@ public class MemStoreScanner extends NonLazyKeyValueScanner { res |= scan.seekToPreviousRow(cell); } this.backwardHeap = - new ReversedKeyValueHeap(scanners, backwardReferenceToMemStore.getComparator()); + new ReversedKeyValueHeap(scanners, comparator); return res; } @@ -327,7 +317,7 @@ public class MemStoreScanner extends NonLazyKeyValueScanner { } } this.backwardHeap = - new ReversedKeyValueHeap(scanners, backwardReferenceToMemStore.getComparator()); + new ReversedKeyValueHeap(scanners, comparator); type = Type.USER_SCAN_BACKWARD; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java index 6337657..3443229 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MutableSegment.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.client.Scan; /** @@ -30,7 +31,7 @@ import org.apache.hadoop.hbase.client.Scan; public class MutableSegment extends Segment { protected MutableSegment(CellSet cellSet, CellComparator comparator, MemStoreLAB memStoreLAB, long size) { - super(cellSet, comparator, memStoreLAB, size); + super(cellSet, comparator, memStoreLAB, size, ClassSize.CONCURRENT_SKIPLISTMAP_ENTRY); } /** @@ -48,7 +49,7 @@ public class MutableSegment extends Segment { public long rollback(Cell cell) { Cell found = getCellSet().get(cell); if (found != null && found.getSequenceId() == cell.getSequenceId()) { - long sz = AbstractMemStore.heapSizeChange(cell, true); + long sz = heapSizeChange(cell, true); getCellSet().remove(cell); incSize(-sz); return sz; @@ -78,15 +79,8 @@ public class MutableSegment extends Segment { } @Override - protected void updateMetaInfo(Cell toAdd, long s) { - getTimeRangeTracker().includeTimestamp(toAdd); - size.addAndGet(s); - // In no tags case this NoTagsKeyValue.getTagsLength() is a cheap call. - // When we use ACL CP or Visibility CP which deals with Tags during - // mutation, the TagRewriteCell.getTagsLength() is a cheaper call. We do not - // parse the byte[] to identify the tags length. - if(toAdd.getTagsLength() > 0) { - tagsPresent = true; - } + public long getInternalSize() { + return size.get() - CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM; } -} \ No newline at end of file + +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java index dd824c1..ddd1766 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java @@ -21,15 +21,19 @@ package org.apache.hadoop.hbase.regionserver; import java.util.Iterator; import java.util.SortedSet; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; +import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.ByteRange; +import org.apache.hadoop.hbase.util.ClassSize; /** * This is an abstraction of a segment maintained in a memstore, e.g., the active @@ -41,32 +45,37 @@ import org.apache.hadoop.hbase.util.ByteRange; @InterfaceAudience.Private public abstract class Segment { - private volatile CellSet cellSet; + private static final Log LOG = LogFactory.getLog(Segment.class); + private AtomicReference cellSet= new AtomicReference(); private final CellComparator comparator; private long minSequenceId; private volatile MemStoreLAB memStoreLAB; protected final AtomicLong size; protected volatile boolean tagsPresent; private final TimeRangeTracker timeRangeTracker; + protected long constantCellMetaDataSize; - protected Segment(CellSet cellSet, CellComparator comparator, MemStoreLAB memStoreLAB, - long size) { - this.cellSet = cellSet; + protected Segment( + CellSet cellSet, CellComparator comparator, MemStoreLAB memStoreLAB, long size, + long constantCellSize) { + this.cellSet.set(cellSet); this.comparator = comparator; this.minSequenceId = Long.MAX_VALUE; this.memStoreLAB = memStoreLAB; this.size = new AtomicLong(size); this.tagsPresent = false; + this.constantCellMetaDataSize = constantCellSize; this.timeRangeTracker = new TimeRangeTracker(); } protected Segment(Segment segment) { - this.cellSet = segment.getCellSet(); + this.cellSet.set(segment.getCellSet()); this.comparator = segment.getComparator(); this.minSequenceId = segment.getMinSequenceId(); this.memStoreLAB = segment.getMemStoreLAB(); this.size = new AtomicLong(segment.getSize()); this.tagsPresent = segment.isTagsPresent(); + this.constantCellMetaDataSize = segment.getConstantCellMetaDataSize(); this.timeRangeTracker = segment.getTimeRangeTracker(); } @@ -181,6 +190,19 @@ public abstract class Segment { } /** + * Setting the CellSet of the segment - used only for flat immutable segment for setting + * immutable CellSet after its creation in immutable segment constructor + * @return this object + */ + + protected Segment setCellSet(CellSet cellSetOld, CellSet cellSetNew) { + this.cellSet.compareAndSet(cellSetOld, cellSetNew); + return this; + } + + public abstract long getInternalSize(); + + /** * Returns the heap size of the segment * @return the heap size of the segment */ @@ -229,7 +251,7 @@ public abstract class Segment { * @return a set of all cells in the segment */ protected CellSet getCellSet() { - return cellSet; + return cellSet.get(); } /** @@ -242,22 +264,23 @@ public abstract class Segment { protected long internalAdd(Cell cell) { boolean succ = getCellSet().add(cell); - long s = AbstractMemStore.heapSizeChange(cell, succ); - updateMetaInfo(cell, s); + long s = updateMetaInfo(cell, succ); return s; } - protected void updateMetaInfo(Cell toAdd, long s) { - getTimeRangeTracker().includeTimestamp(toAdd); + protected long updateMetaInfo(Cell cellToAdd, boolean succ) { + long s = heapSizeChange(cellToAdd, succ); + getTimeRangeTracker().includeTimestamp( cellToAdd); size.addAndGet(s); - minSequenceId = Math.min(minSequenceId, toAdd.getSequenceId()); + minSequenceId = Math.min(minSequenceId, cellToAdd.getSequenceId()); // In no tags case this NoTagsKeyValue.getTagsLength() is a cheap call. // When we use ACL CP or Visibility CP which deals with Tags during // mutation, the TagRewriteCell.getTagsLength() is a cheaper call. We do not // parse the byte[] to identify the tags length. - if(toAdd.getTagsLength() > 0) { + if( cellToAdd.getTagsLength() > 0) { tagsPresent = true; } + return s; } /** @@ -269,7 +292,7 @@ public abstract class Segment { return getCellSet().tailSet(firstCell); } - private MemStoreLAB getMemStoreLAB() { + protected MemStoreLAB getMemStoreLAB() { return memStoreLAB; } @@ -283,6 +306,22 @@ public abstract class Segment { } } + /* + * Calculate how the MemStore size has changed. Includes overhead of the + * backing Map. + * @param cell + * @param notPresent True if the cell was NOT present in the set. + * @return change in size + */ + protected long heapSizeChange(final Cell cell, final boolean notPresent){ + return + notPresent ? + ClassSize.align(constantCellMetaDataSize + CellUtil.estimatedHeapSizeOf(cell)) : 0; + } + + public long getConstantCellMetaDataSize() { + return this.constantCellMetaDataSize; + } @Override public String toString() { String res = "Store segment of type "+this.getClass().getName()+"; "; @@ -292,4 +331,4 @@ public abstract class Segment { res += "Min ts "+getMinTimestamp()+"; "; return res; } -} \ No newline at end of file +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java index 7ac80ae..6f92361 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java @@ -18,11 +18,15 @@ */ package org.apache.hadoop.hbase.regionserver; +import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.ReflectionUtils; +import java.io.IOException; + /** * A singleton store segment factory. * Generate concrete store segments. @@ -40,28 +44,44 @@ public final class SegmentFactory { return instance; } + // create skip-list-based (non-flat) immutable segment from compacting old immutable segments public ImmutableSegment createImmutableSegment(final Configuration conf, - final CellComparator comparator, long size) { + final CellComparator comparator, MemStoreCompactorIterator iterator) { MemStoreLAB memStoreLAB = getMemStoreLAB(conf); - MutableSegment segment = generateMutableSegment(conf, comparator, memStoreLAB, size); - return createImmutableSegment(segment); + return + new ImmutableSegment(comparator, iterator, memStoreLAB); } - public ImmutableSegment createImmutableSegment(CellComparator comparator, - long size) { + // create empty immutable segment + public ImmutableSegment createImmutableSegment(CellComparator comparator, long size) { MutableSegment segment = generateMutableSegment(null, comparator, null, size); return createImmutableSegment(segment); } + // create immutable segment from mutable public ImmutableSegment createImmutableSegment(MutableSegment segment) { return new ImmutableSegment(segment); } + + // create mutable segment public MutableSegment createMutableSegment(final Configuration conf, CellComparator comparator, long size) { MemStoreLAB memStoreLAB = getMemStoreLAB(conf); return generateMutableSegment(conf, comparator, memStoreLAB, size); } + // create new flat immutable segment from compacting old immutable segment + public ImmutableSegment createImmutableSegment(final Configuration conf, final CellComparator comparator, + MemStoreCompactorIterator iterator, int numOfCells, ImmutableSegment.Type segmentType) + throws IOException { + Preconditions.checkArgument( + segmentType != ImmutableSegment.Type.SKIPLIST_MAP_BASED, "wrong immutable segment type"); + MemStoreLAB memStoreLAB = getMemStoreLAB(conf); + return + new ImmutableSegment( + conf, comparator, iterator, memStoreLAB, numOfCells, segmentType); + } + //****** private methods to instantiate concrete store segments **********// private MutableSegment generateMutableSegment( diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java index 1191f30..8cf0a7c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentScanner.java @@ -305,10 +305,6 @@ public class SegmentScanner implements KeyValueScanner { // do nothing } - protected Segment getSegment(){ - return segment; - } - //debug method @Override public String toString() { @@ -320,6 +316,10 @@ public class SegmentScanner implements KeyValueScanner { /********************* Private Methods **********************/ + private Segment getSegment(){ + return segment; + } + /** * Private internal method for iterating over the segment, * skipping the cells with irrelevant MVCC diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java index 9d7a723..505ccf4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java @@ -38,8 +38,7 @@ public class VersionedSegmentsList { private final LinkedList storeSegments; private final long version; - public VersionedSegmentsList( - LinkedList storeSegments, long version) { + public VersionedSegmentsList(LinkedList storeSegments, long version) { this.storeSegments = storeSegments; this.version = version; } @@ -51,4 +50,13 @@ public class VersionedSegmentsList { public long getVersion() { return version; } + + public int getNumOfCells() { + int totalCells = 0; + for (ImmutableSegment s : storeSegments) { + totalCells += s.getCellsCount(); + } + return totalCells; + } + } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java index 09e2271..e7d6661 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/TestHeapSize.java @@ -28,11 +28,7 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.hfile.BlockCacheKey; import org.apache.hadoop.hbase.io.hfile.LruBlockCache; import org.apache.hadoop.hbase.io.hfile.LruCachedBlock; -import org.apache.hadoop.hbase.regionserver.CellSet; -import org.apache.hadoop.hbase.regionserver.DefaultMemStore; -import org.apache.hadoop.hbase.regionserver.HRegion; -import org.apache.hadoop.hbase.regionserver.HStore; -import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; +import org.apache.hadoop.hbase.regionserver.*; import org.apache.hadoop.hbase.testclassification.IOTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.ClassSize; @@ -174,6 +170,15 @@ public class TestHeapSize { assertEquals(expected, actual); } + // CellArrayMap + cl = CellArrayMap.class; + expected = ClassSize.estimateBase(cl, false); + actual = ClassSize.CELL_ARRAY_MAP; + if(expected != actual) { + ClassSize.estimateBase(cl, true); + assertEquals(expected, actual); + } + // ReentrantReadWriteLock cl = ReentrantReadWriteLock.class; expected = ClassSize.estimateBase(cl, false); @@ -240,7 +245,7 @@ public class TestHeapSize { // CellSet cl = CellSet.class; expected = ClassSize.estimateBase(cl, false); - actual = ClassSize.CELL_SKIPLIST_SET; + actual = ClassSize.CELL_SET; if (expected != actual) { ClassSize.estimateBase(cl, true); assertEquals(expected, actual); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java new file mode 100644 index 0000000..cd5788e --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCellFlatSet.java @@ -0,0 +1,143 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import junit.framework.TestCase; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.experimental.categories.Category; + +import java.util.Iterator; +import java.util.NavigableMap; +import java.util.SortedSet; +import static org.junit.Assert.assertTrue; + +@Category({RegionServerTests.class, SmallTests.class}) +public class TestCellFlatSet extends TestCase { + + private static final int NUM_OF_CELLS = 4; + + private Cell cells[]; + private CellArrayMap cbOnHeap; + + private final static Configuration conf = new Configuration(); + private HeapMemStoreLAB mslab; + + + protected void setUp() throws Exception { + super.setUp(); + + // create array of Cells to bass to the CellFlatMap under CellSet + final byte[] one = Bytes.toBytes(15); + final byte[] two = Bytes.toBytes(25); + final byte[] three = Bytes.toBytes(35); + final byte[] four = Bytes.toBytes(45); + + final byte[] f = Bytes.toBytes("f"); + final byte[] q = Bytes.toBytes("q"); + final byte[] v = Bytes.toBytes(4); + + final KeyValue kv1 = new KeyValue(one, f, q, 10, v); + final KeyValue kv2 = new KeyValue(two, f, q, 20, v); + final KeyValue kv3 = new KeyValue(three, f, q, 30, v); + final KeyValue kv4 = new KeyValue(four, f, q, 40, v); + + cells = new Cell[] {kv1,kv2,kv3,kv4}; + cbOnHeap = new CellArrayMap(CellComparator.COMPARATOR,cells,0,NUM_OF_CELLS,false); + + conf.setBoolean(SegmentFactory.USEMSLAB_KEY, true); + conf.setFloat(MemStoreChunkPool.CHUNK_POOL_MAXSIZE_KEY, 0.2f); + MemStoreChunkPool.chunkPoolDisabled = false; + mslab = new HeapMemStoreLAB(conf); + } + + /* Create and test CellSet based on CellArrayMap */ + public void testCellBlocksOnHeap() throws Exception { + CellSet cs = new CellSet(cbOnHeap); + testCellBlocks(cs); + testIterators(cs); + } + + /* Generic basic test for immutable CellSet */ + private void testCellBlocks(CellSet cs) throws Exception { + final byte[] oneAndHalf = Bytes.toBytes(20); + final byte[] f = Bytes.toBytes("f"); + final byte[] q = Bytes.toBytes("q"); + final byte[] v = Bytes.toBytes(4); + final KeyValue outerCell = new KeyValue(oneAndHalf, f, q, 10, v); + + assertEquals(NUM_OF_CELLS, cs.size()); // check size + assertFalse(cs.contains(outerCell)); // check outer cell + + assertTrue(cs.contains(cells[0])); // check existence of the first + Cell first = cs.first(); + assertTrue(cells[0].equals(first)); + + assertTrue(cs.contains(cells[NUM_OF_CELLS - 1])); // check last + Cell last = cs.last(); + assertTrue(cells[NUM_OF_CELLS - 1].equals(last)); + + SortedSet tail = cs.tailSet(cells[1]); // check tail abd head sizes + assertEquals(NUM_OF_CELLS - 1, tail.size()); + SortedSet head = cs.headSet(cells[1]); + assertEquals(1, head.size()); + + SortedSet tailOuter = cs.tailSet(outerCell); // check tail starting from outer cell + assertEquals(NUM_OF_CELLS - 1, tailOuter.size()); + + Cell tailFirst = tail.first(); + assertTrue(cells[1].equals(tailFirst)); + Cell tailLast = tail.last(); + assertTrue(cells[NUM_OF_CELLS - 1].equals(tailLast)); + + Cell headFirst = head.first(); + assertTrue(cells[0].equals(headFirst)); + Cell headLast = head.last(); + assertTrue(cells[0].equals(headLast)); + } + + /* Generic iterators test for immutable CellSet */ + private void testIterators(CellSet cs) throws Exception { + + // Assert that we have NUM_OF_CELLS values and that they are in order + int count = 0; + for (Cell kv: cs) { + assertEquals("\n\n-------------------------------------------------------------------\n" + + "Comparing iteration number " + (count + 1) + " the returned cell: " + kv + + ", the first Cell in the CellBlocksMap: " + cells[count] + + ", and the same transformed to String: " + cells[count].toString() + + "\n-------------------------------------------------------------------\n", + cells[count], kv); + count++; + } + assertEquals(NUM_OF_CELLS, count); + + // Test descending iterator + count = 0; + for (Iterator i = cs.descendingIterator(); i.hasNext();) { + Cell kv = i.next(); + assertEquals(cells[NUM_OF_CELLS - (count + 1)], kv); + count++; + } + assertEquals(NUM_OF_CELLS, count); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java index c5aae00..3fae87f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java @@ -58,15 +58,15 @@ import static org.junit.Assert.assertTrue; public class TestCompactingMemStore extends TestDefaultMemStore { private static final Log LOG = LogFactory.getLog(TestCompactingMemStore.class); - private static MemStoreChunkPool chunkPool; - private HRegion region; - private RegionServicesForStores regionServicesForStores; - private HStore store; + protected static MemStoreChunkPool chunkPool; + protected HRegion region; + protected RegionServicesForStores regionServicesForStores; + protected HStore store; ////////////////////////////////////////////////////////////////////////////// // Helpers ////////////////////////////////////////////////////////////////////////////// - private static byte[] makeQualifier(final int i1, final int i2) { + protected static byte[] makeQualifier(final int i1, final int i2) { return Bytes.toBytes(Integer.toString(i1) + ";" + Integer.toString(i2)); } @@ -79,6 +79,12 @@ public class TestCompactingMemStore extends TestDefaultMemStore { @Override @Before public void setUp() throws Exception { + compactingSetUp(); + this.memstore = new CompactingMemStore(HBaseConfiguration.create(), CellComparator.COMPARATOR, + store, regionServicesForStores); + } + + protected void compactingSetUp() throws Exception { super.internalSetUp(); Configuration conf = new Configuration(); conf.setBoolean(SegmentFactory.USEMSLAB_KEY, true); @@ -89,13 +95,11 @@ public class TestCompactingMemStore extends TestDefaultMemStore { this.region = hbaseUtility.createTestRegion("foobar", hcd); this.regionServicesForStores = region.getRegionServicesForStores(); this.store = new HStore(region, hcd, conf); - this.memstore = new CompactingMemStore(HBaseConfiguration.create(), CellComparator.COMPARATOR, - store, regionServicesForStores); + chunkPool = MemStoreChunkPool.getPool(conf); assertTrue(chunkPool != null); } - /** * A simple test which verifies the 3 possible states when scanning across snapshot. * @@ -624,6 +628,11 @@ public class TestCompactingMemStore extends TestDefaultMemStore { while (((CompactingMemStore)memstore).isMemStoreFlushingInMemory()) { Threads.sleep(1000); } + int counter = 0; + for ( Segment s : memstore.getSegments()) { + counter += s.getCellsCount(); + } + assertEquals(3, counter); assertEquals(0, memstore.getSnapshot().getCellsCount()); assertEquals(376, regionServicesForStores.getGlobalMemstoreTotalSize()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java new file mode 100644 index 0000000..2fe31a2 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingToCellArrayMapMemStore.java @@ -0,0 +1,250 @@ +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.testclassification.RegionServerTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdge; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Threads; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import org.junit.experimental.categories.Category; + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryMXBean; +import java.util.ArrayList; +import java.util.List; + + + +/** + * compacted memstore test case + */ +@Category({RegionServerTests.class, MediumTests.class}) +public class TestCompactingToCellArrayMapMemStore extends TestCompactingMemStore { + + private static final Log LOG = LogFactory.getLog(TestCompactingToCellArrayMapMemStore.class); + //private static MemStoreChunkPool chunkPool; + //private HRegion region; + //private RegionServicesForStores regionServicesForStores; + //private HStore store; + + ////////////////////////////////////////////////////////////////////////////// + // Helpers + ////////////////////////////////////////////////////////////////////////////// + + @Override public void tearDown() throws Exception { + chunkPool.clearChunks(); + } + + @Override public void setUp() throws Exception { + compactingSetUp(); + Configuration conf = HBaseConfiguration.create(); + + conf.setLong("hbase.hregion.compacting.memstore.type", 2); // compact to CellArrayMap + + this.memstore = + new CompactingMemStore(conf, CellComparator.COMPARATOR, store, + regionServicesForStores); + } + + ////////////////////////////////////////////////////////////////////////////// + // Compaction tests + ////////////////////////////////////////////////////////////////////////////// + public void testCompaction1Bucket() throws IOException { + int counter = 0; + String[] keys1 = { "A", "A", "B", "C" }; //A1, A2, B3, C4 + + // test 1 bucket + addRowsByKeys(memstore, keys1); + + assertEquals(704, regionServicesForStores.getGlobalMemstoreTotalSize()); + assertEquals(4, memstore.getActive().getCellsCount()); + long size = memstore.getFlushableSize(); + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact + while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(10); + } + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(456, regionServicesForStores.getGlobalMemstoreTotalSize()); + for ( Segment s : memstore.getSegments()) { + counter += s.getCellsCount(); + } + assertEquals(3, counter); + size = memstore.getFlushableSize(); + MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot + region.addAndGetGlobalMemstoreSize(-size); // simulate flusher + ImmutableSegment s = memstore.getSnapshot(); + assertEquals(3, s.getCellsCount()); + assertEquals(0, regionServicesForStores.getGlobalMemstoreTotalSize()); + + memstore.clearSnapshot(snapshot.getId()); + } + + public void testCompaction2Buckets() throws IOException { + + String[] keys1 = { "A", "A", "B", "C" }; + String[] keys2 = { "A", "B", "D" }; + + addRowsByKeys(memstore, keys1); + assertEquals(704, regionServicesForStores.getGlobalMemstoreTotalSize()); + long size = memstore.getFlushableSize(); + +// assertTrue( +// "\n\n<<< This is the active size with 4 keys - " + memstore.getActive().getSize() +// + ". This is the memstore flushable size - " + size + "\n",false); + + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact + while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(1000); + } + int counter = 0; + for ( Segment s : memstore.getSegments()) { + counter += s.getCellsCount(); + } + assertEquals(3,counter); + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(456, regionServicesForStores.getGlobalMemstoreTotalSize()); + + addRowsByKeys(memstore, keys2); + assertEquals(984, regionServicesForStores.getGlobalMemstoreTotalSize()); + + size = memstore.getFlushableSize(); + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact + int i = 0; + while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(10); + if (i > 10000000) { + ((CompactingMemStore) memstore).debug(); + assertTrue("\n\n<<< Infinite loop! :( \n", false); + } + } + assertEquals(0, memstore.getSnapshot().getCellsCount()); + counter = 0; + for ( Segment s : memstore.getSegments()) { + counter += s.getCellsCount(); + } + assertEquals(4,counter); + assertEquals(608, regionServicesForStores.getGlobalMemstoreTotalSize()); + + size = memstore.getFlushableSize(); + MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot + region.addAndGetGlobalMemstoreSize(-size); // simulate flusher + ImmutableSegment s = memstore.getSnapshot(); + assertEquals(4, s.getCellsCount()); + assertEquals(0, regionServicesForStores.getGlobalMemstoreTotalSize()); + + memstore.clearSnapshot(snapshot.getId()); + } + + public void testCompaction3Buckets() throws IOException { + + String[] keys1 = { "A", "A", "B", "C" }; + String[] keys2 = { "A", "B", "D" }; + String[] keys3 = { "D", "B", "B" }; + + addRowsByKeys(memstore, keys1); + assertEquals(704, region.getMemstoreSize()); + + long size = memstore.getFlushableSize(); + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact + + String tstStr = "\n\nFlushable size after first flush in memory:" + size + ". Is MemmStore in compaction?:" + + ((CompactingMemStore) memstore).isMemStoreFlushingInMemory(); + while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(10); + } + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(456, regionServicesForStores.getGlobalMemstoreTotalSize()); + + addRowsByKeys(memstore, keys2); + + tstStr += " After adding second part of the keys. Memstore size: " + + region.getMemstoreSize() + ", Memstore Total Size: " + + regionServicesForStores.getGlobalMemstoreTotalSize() + "\n\n"; + + assertEquals(984, regionServicesForStores.getGlobalMemstoreTotalSize()); + + ((CompactingMemStore) memstore).disableCompaction(); + size = memstore.getFlushableSize(); + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline without compaction + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(984, regionServicesForStores.getGlobalMemstoreTotalSize()); + + addRowsByKeys(memstore, keys3); + assertEquals(1512, regionServicesForStores.getGlobalMemstoreTotalSize()); + + ((CompactingMemStore) memstore).enableCompaction(); + size = memstore.getFlushableSize(); + ((CompactingMemStore) memstore).flushInMemory(); // push keys to pipeline and compact + while (((CompactingMemStore) memstore).isMemStoreFlushingInMemory()) { + Threads.sleep(10); + } + assertEquals(0, memstore.getSnapshot().getCellsCount()); + assertEquals(608, regionServicesForStores.getGlobalMemstoreTotalSize()); + + size = memstore.getFlushableSize(); + MemStoreSnapshot snapshot = memstore.snapshot(); // push keys to snapshot + region.addAndGetGlobalMemstoreSize(-size); // simulate flusher + ImmutableSegment s = memstore.getSnapshot(); + assertEquals(4, s.getCellsCount()); + assertEquals(0, regionServicesForStores.getGlobalMemstoreTotalSize()); + + memstore.clearSnapshot(snapshot.getId()); + + //assertTrue(tstStr, false); + } + + private void addRowsByKeys(final AbstractMemStore hmc, String[] keys) { + byte[] fam = Bytes.toBytes("testfamily"); + byte[] qf = Bytes.toBytes("testqualifier"); + for (int i = 0; i < keys.length; i++) { + long timestamp = System.currentTimeMillis(); + Threads.sleep(1); // to make sure each kv gets a different ts + byte[] row = Bytes.toBytes(keys[i]); + byte[] val = Bytes.toBytes(keys[i] + i); + KeyValue kv = new KeyValue(row, fam, qf, timestamp, val); + hmc.add(kv); + LOG.debug("added kv: " + kv.getKeyString() + ", timestamp" + kv.getTimestamp()); + long size = AbstractMemStore.heapSizeChange(kv, true); + regionServicesForStores.addAndGetGlobalMemstoreSize(size); + } + } + + private class EnvironmentEdgeForMemstoreTest implements EnvironmentEdge { + long t = 1234; + + @Override public long currentTime() { + return t; + } + + public void setCurrentTimeMillis(long t) { + this.t = t; + } + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 9aa3a9b..c44f022 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -5109,7 +5109,7 @@ public class TestHRegion { * * @throws IOException */ - private void assertScan(final HRegion r, final byte[] fs, final byte[] firstValue) + protected void assertScan(final HRegion r, final byte[] fs, final byte[] firstValue) throws IOException { byte[][] families = { fs }; Scan scan = new Scan(); @@ -5172,7 +5172,7 @@ public class TestHRegion { } } - private Configuration initSplit() { + protected Configuration initSplit() { // Always compact if there is more than one store file. CONF.setInt("hbase.hstore.compactionThreshold", 2); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java index be604af..829315d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java @@ -19,10 +19,13 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; +import java.util.TreeMap; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CategoryBasedTimeout; +import org.apache.hadoop.hbase.HBaseTestCase; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Durability; @@ -30,11 +33,18 @@ import org.apache.hadoop.hbase.client.TestMobSnapshotCloneIndependence; import org.apache.hadoop.hbase.master.procedure.TestMasterFailoverWithProcedures; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests; +import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.wal.WAL; import org.junit.ClassRule; +import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.TestRule; +import static org.apache.hadoop.hbase.HBaseTestingUtility.*; +import static org.apache.hadoop.hbase.HBaseTestingUtility.START_KEY; +import static org.apache.hadoop.hbase.HBaseTestingUtility.fam3; +import static org.junit.Assert.assertNotNull; + /** * A test similar to TestHRegion, but with in-memory flush families. * Also checks wal truncation after in-memory compaction. @@ -65,5 +75,96 @@ public class TestHRegionWithInMemoryFlush extends TestHRegion{ isReadOnly, durability, wal, inMemory, families); } + /** + * Splits twice and verifies getting from each of the split regions. + * + * @throws Exception + */ + @Override + public void testBasicSplit() throws Exception { + byte[][] families = { fam1, fam2, fam3 }; + + Configuration hc = initSplit(); + // Setting up region + String method = this.getName(); + this.region = initHRegion(tableName, method, hc, families); + + try { + LOG.info("" + HBaseTestCase.addContent(region, fam3)); + region.flush(true); + region.compactStores(); + byte[] splitRow = region.checkSplit(); + assertNotNull(splitRow); + LOG.info("SplitRow: " + Bytes.toString(splitRow)); + HRegion[] regions = splitRegion(region, splitRow); + try { + // Need to open the regions. + // TODO: Add an 'open' to HRegion... don't do open by constructing + // instance. + for (int i = 0; i < regions.length; i++) { + regions[i] = HRegion.openHRegion(regions[i], null); + } + // Assert can get rows out of new regions. Should be able to get first + // row from first region and the midkey from second region. + assertGet(regions[0], fam3, Bytes.toBytes(START_KEY)); + assertGet(regions[1], fam3, splitRow); + // Test I can get scanner and that it starts at right place. + assertScan(regions[0], fam3, Bytes.toBytes(START_KEY)); + assertScan(regions[1], fam3, splitRow); + // Now prove can't split regions that have references. + for (int i = 0; i < regions.length; i++) { + // Add so much data to this region, we create a store file that is > + // than one of our unsplitable references. it will. + for (int j = 0; j < 2; j++) { + HBaseTestCase.addContent(regions[i], fam3); + } + HBaseTestCase.addContent(regions[i], fam2); + HBaseTestCase.addContent(regions[i], fam1); + regions[i].flush(true); + } + + byte[][] midkeys = new byte[regions.length][]; + // To make regions splitable force compaction. + for (int i = 0; i < regions.length; i++) { + regions[i].compactStores(); + midkeys[i] = regions[i].checkSplit(); + } + + TreeMap sortedMap = new TreeMap(); + // Split these two daughter regions so then I'll have 4 regions. Will + // split because added data above. + for (int i = 0; i < regions.length; i++) { + HRegion[] rs = null; + if (midkeys[i] != null) { + rs = splitRegion(regions[i], midkeys[i]); + for (int j = 0; j < rs.length; j++) { + sortedMap.put(Bytes.toString(rs[j].getRegionInfo().getRegionName()), + HRegion.openHRegion(rs[j], null)); + } + } + } + LOG.info("Made 4 regions"); + // The splits should have been even. Test I can get some arbitrary row + // out of each. +// int interval = (LAST_CHAR - FIRST_CHAR) / 3; +// byte[] b = Bytes.toBytes(START_KEY); +// for (HRegion r : sortedMap.values()) { +// assertGet(r, fam3, b); +// b[0] += interval; +// } + } finally { + for (int i = 0; i < regions.length; i++) { + try { + regions[i].close(); + } catch (IOException e) { + // Ignore. + } + } + } + } finally { + HBaseTestingUtility.closeRegionAndWAL(this.region); + this.region = null; + } + } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java index 2acfd12..21b7d2c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java @@ -201,7 +201,8 @@ public class TestWalAndCompactingMemStoreFlush { // memstores of CF1, CF2 and CF3. String msg = "totalMemstoreSize="+totalMemstoreSize + " DefaultMemStore.DEEP_OVERHEAD="+DefaultMemStore.DEEP_OVERHEAD + - " DEEP_OVERHEAD_PER_PIPELINE_ITEM="+CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_ITEM + + " DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM="+CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM + + " cf1MemstoreSizePhaseI="+cf1MemstoreSizePhaseI + " cf2MemstoreSizePhaseI="+cf2MemstoreSizePhaseI + " cf3MemstoreSizePhaseI="+cf3MemstoreSizePhaseI ; @@ -238,8 +239,8 @@ public class TestWalAndCompactingMemStoreFlush { s = s + "DefaultMemStore DEEP_OVERHEAD is:" + DefaultMemStore.DEEP_OVERHEAD + ", CompactingMemStore DEEP_OVERHEAD is:" + CompactingMemStore.DEEP_OVERHEAD - + ", CompactingMemStore DEEP_OVERHEAD_PER_PIPELINE_ITEM is:" + CompactingMemStore - .DEEP_OVERHEAD_PER_PIPELINE_ITEM + + ", CompactingMemStore DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM is:" + CompactingMemStore + .DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM + "\n----After first flush! CF1 should be flushed to memory, but not compacted.---\n" + "Size of CF1 is:" + cf1MemstoreSizePhaseII + ", size of CF2 is:" + cf2MemstoreSizePhaseII + ", size of CF3 is:" + cf3MemstoreSizePhaseII + "\n"; @@ -247,14 +248,14 @@ public class TestWalAndCompactingMemStoreFlush { // CF1 was flushed to memory, but there is nothing to compact, should // remain the same size plus renewed empty skip-list assertEquals(s, cf1MemstoreSizePhaseII, - cf1MemstoreSizePhaseI + CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_ITEM); + cf1MemstoreSizePhaseI + CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM); // CF2 should become empty assertEquals(DefaultMemStore.DEEP_OVERHEAD, cf2MemstoreSizePhaseII); // verify that CF3 was flushed to memory and was compacted (this is approximation check) assertTrue(cf3MemstoreSizePhaseI/2+DefaultMemStore.DEEP_OVERHEAD + - CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_ITEM > + CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM > cf3MemstoreSizePhaseII); assertTrue(cf3MemstoreSizePhaseI/2 < cf3MemstoreSizePhaseII); @@ -419,7 +420,8 @@ public class TestWalAndCompactingMemStoreFlush { // memstores of CF1, CF2 and CF3. String msg = "totalMemstoreSize="+totalMemstoreSize + " DefaultMemStore.DEEP_OVERHEAD="+DefaultMemStore.DEEP_OVERHEAD + - " DEEP_OVERHEAD_PER_PIPELINE_ITEM="+CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_ITEM + + " DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM="+CompactingMemStore.DEEP_OVERHEAD_PER_PIPELINE_SKIPLIST_ITEM + + " cf1MemstoreSizePhaseI="+cf1MemstoreSizePhaseI + " cf2MemstoreSizePhaseI="+cf2MemstoreSizePhaseI + " cf3MemstoreSizePhaseI="+cf3MemstoreSizePhaseI ; diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 5f04d1d..9c05374 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -814,7 +814,7 @@ module Hbase family.setScope(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::REPLICATION_SCOPE) family.setCacheDataOnWrite(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::CACHE_DATA_ON_WRITE) family.setInMemory(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY) - family.setCompacted(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION) + family.setInMemoryCompaction(JBoolean.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION) family.setTimeToLive(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::TTL)) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::TTL) family.setDataBlockEncoding(org.apache.hadoop.hbase.io.encoding.DataBlockEncoding.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::DATA_BLOCK_ENCODING) family.setBlocksize(JInteger.valueOf(arg.delete(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE))) if arg.include?(org.apache.hadoop.hbase.HColumnDescriptor::BLOCKSIZE) -- 1.7.10.2 (Apple Git-33)