diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index 799a64a..5391c1e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -186,19 +186,19 @@ public class CompactSplitThread implements CompactionRequestor { public synchronized void requestCompaction(final HRegion r, final String why) throws IOException { - for(Store s : r.getStores().values()) { + for (HStore s : r.getStores().values()) { requestCompaction(r, s, why, Store.NO_PRIORITY); } } - public synchronized void requestCompaction(final HRegion r, final Store s, + public synchronized void requestCompaction(final HRegion r, final HStore s, final String why) throws IOException { requestCompaction(r, s, why, Store.NO_PRIORITY); } public synchronized void requestCompaction(final HRegion r, final String why, int p) throws IOException { - for(Store s : r.getStores().values()) { + for (HStore s : r.getStores().values()) { requestCompaction(r, s, why, p); } } @@ -209,7 +209,7 @@ public class CompactSplitThread implements CompactionRequestor { * @param why Why compaction requested -- used in debug messages * @param priority override the default priority (NO_PRIORITY == decide) */ - public synchronized void requestCompaction(final HRegion r, final Store s, + public synchronized void requestCompaction(final HRegion r, final HStore s, final String why, int priority) throws IOException { if (this.server.isStopped()) { return; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java index ad0bb98..8d957fb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionRequestor.java @@ -37,7 +37,8 @@ public interface CompactionRequestor { * @param why Why compaction was requested -- used in debug messages * @throws IOException */ - public void requestCompaction(final HRegion r, final Store s, final String why) throws IOException; + public void requestCompaction(final HRegion r, final HStore s, final String why) + throws IOException; /** * @param r Region to compact @@ -54,7 +55,7 @@ public interface CompactionRequestor { * @param pri Priority of this compaction. minHeap. <=0 is critical * @throws IOException */ - public void requestCompaction(final HRegion r, final Store s, + public void requestCompaction(final HRegion r, final HStore s, final String why, int pri) throws IOException; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java index a35f285..db6e6b5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.java @@ -51,7 +51,7 @@ public class ConstantSizeRegionSplitPolicy extends RegionSplitPolicy { boolean force = region.shouldForceSplit(); boolean foundABigStore = false; - for (Store store : region.getStores().values()) { + for (HStore store : region.getStores().values()) { // If any of the stores are unable to split (eg they contain reference files) // then don't split if ((!store.canSplit())) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 0ab5bf8..adcf830 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -203,8 +203,8 @@ public class HRegion implements HeapSize { // , Writable{ private final AtomicInteger lockIdGenerator = new AtomicInteger(1); static private Random rand = new Random(); - protected final Map stores = - new ConcurrentSkipListMap(Bytes.BYTES_RAWCOMPARATOR); + protected final Map stores = new ConcurrentSkipListMap( + Bytes.BYTES_RAWCOMPARATOR); // Registered region protocol handlers private ClassToInstanceMap @@ -642,7 +642,7 @@ public class HRegion implements HeapSize { // , Writable{ * @return True if this region has references. */ public boolean hasReferences() { - for (Store store : this.stores.values()) { + for (HStore store : this.stores.values()) { for (StoreFile sf : store.getStorefiles()) { // Found a reference, return. if (sf.isReference()) return true; @@ -660,7 +660,7 @@ public class HRegion implements HeapSize { // , Writable{ HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); synchronized (this.stores) { - for (Store store : this.stores.values()) { + for (HStore store : this.stores.values()) { for (StoreFile sf : store.getStorefiles()) { HDFSBlocksDistribution storeFileBlocksDistribution = sf.getHDFSBlockDistribution(); @@ -977,7 +977,7 @@ public class HRegion implements HeapSize { // , Writable{ storeCloserThreadPool); // close each store in parallel - for (final Store store : stores.values()) { + for (final HStore store : stores.values()) { completionService .submit(new Callable>() { public ImmutableList call() throws IOException { @@ -1173,7 +1173,7 @@ public class HRegion implements HeapSize { // , Writable{ /** @return returns size of largest HStore. */ public long getLargestHStoreSize() { long size = 0; - for (Store h: stores.values()) { + for (HStore h : stores.values()) { long storeSize = h.getSize(); if (storeSize > size) { size = storeSize; @@ -1205,7 +1205,7 @@ public class HRegion implements HeapSize { // , Writable{ } void triggerMajorCompaction() { - for (Store h: stores.values()) { + for (HStore h : stores.values()) { h.triggerMajorCompaction(); } } @@ -1232,7 +1232,7 @@ public class HRegion implements HeapSize { // , Writable{ * @throws IOException e */ public void compactStores() throws IOException { - for(Store s : getStores().values()) { + for (HStore s : getStores().values()) { CompactionRequest cr = s.requestCompaction(); if(cr != null) { try { @@ -1500,7 +1500,7 @@ public class HRegion implements HeapSize { // , Writable{ wal.startCacheFlush(this.regionInfo.getEncodedNameAsBytes()); completeSequenceId = this.getCompleteCacheFlushSequenceId(sequenceId); - for (Store s : stores.values()) { + for (HStore s : stores.values()) { storeFlushers.add(s.getStoreFlusher(completeSequenceId)); } @@ -1658,7 +1658,7 @@ public class HRegion implements HeapSize { // , Writable{ startRegionOperation(); this.readRequestsCount.increment(); try { - Store store = getStore(family); + HStore store = getStore(family); // get the closest key. (HStore.getRowKeyAtOrBefore can return null) KeyValue key = store.getRowKeyAtOrBefore(row); Result result = null; @@ -2662,7 +2662,7 @@ public class HRegion implements HeapSize { // , Writable{ byte[] family = e.getKey(); List edits = e.getValue(); - Store store = getStore(family); + HStore store = getStore(family); for (KeyValue kv: edits) { kv.setMemstoreTS(localizedWriteEntry.getWriteNumber()); size += store.add(kv); @@ -2702,7 +2702,7 @@ public class HRegion implements HeapSize { // , Writable{ // Remove those keys from the memstore that matches our // key's (row, cf, cq, timestamp, memstoreTS). The interesting part is // that even the memstoreTS has to match for keys that will be rolleded-back. - Store store = getStore(family); + HStore store = getStore(family); for (KeyValue kv: edits) { store.rollback(kv); kvsRolledback++; @@ -2918,7 +2918,7 @@ public class HRegion implements HeapSize { // , Writable{ long editsCount = 0; long intervalEdits = 0; HLog.Entry entry; - Store store = null; + HStore store = null; boolean reported_once = false; try { @@ -3056,7 +3056,7 @@ public class HRegion implements HeapSize { // , Writable{ * @param kv KeyValue to add. * @return True if we should flush. */ - protected boolean restoreEdit(final Store s, final KeyValue kv) { + protected boolean restoreEdit(final HStore s, final KeyValue kv) { long kvSize = s.add(kv); if (this.rsAccounting != null) { rsAccounting.addAndGetRegionReplayEditsSize(this.regionInfo.getRegionName(), kvSize); @@ -3091,11 +3091,11 @@ public class HRegion implements HeapSize { // , Writable{ * @return Store that goes with the family on passed column. * TODO: Make this lookup faster. */ - public Store getStore(final byte [] column) { + public HStore getStore(final byte[] column) { return this.stores.get(column); } - public Map getStores() { + public Map getStores() { return this.stores; } @@ -3111,7 +3111,7 @@ public class HRegion implements HeapSize { // , Writable{ List storeFileNames = new ArrayList(); synchronized(closeLock) { for(byte[] column : columns) { - Store store = this.stores.get(column); + HStore store = this.stores.get(column); if (store == null) { throw new IllegalArgumentException("No column family : " + new String(column) + " available"); @@ -3331,7 +3331,7 @@ public class HRegion implements HeapSize { // , Writable{ byte[] familyName = p.getFirst(); String path = p.getSecond(); - Store store = getStore(familyName); + HStore store = getStore(familyName); if (store == null) { IOException ioe = new DoNotRetryIOException( "No such column family " + Bytes.toStringBinary(familyName)); @@ -3373,7 +3373,7 @@ public class HRegion implements HeapSize { // , Writable{ for (Pair p : familyPaths) { byte[] familyName = p.getFirst(); String path = p.getSecond(); - Store store = getStore(familyName); + HStore store = getStore(familyName); try { store.bulkLoadHFile(path); } catch (IOException ioe) { @@ -3474,7 +3474,7 @@ public class HRegion implements HeapSize { // , Writable{ for (Map.Entry> entry : scan.getFamilyMap().entrySet()) { - Store store = stores.get(entry.getKey()); + HStore store = stores.get(entry.getKey()); KeyValueScanner scanner = store.getScanner(scan, entry.getValue()); scanners.add(scanner); } @@ -4252,7 +4252,7 @@ public class HRegion implements HeapSize { // , Writable{ * @throws IOException */ boolean isMajorCompaction() throws IOException { - for (Store store: this.stores.values()) { + for (HStore store : this.stores.values()) { if (store.isMajorCompaction()) { return true; } @@ -4638,7 +4638,7 @@ public class HRegion implements HeapSize { // , Writable{ boolean flush = false; WALEdit walEdits = null; List allKVs = new ArrayList(append.size()); - Map> tempMemstore = new HashMap>(); + Map> tempMemstore = new HashMap>(); long before = EnvironmentEdgeManager.currentTimeMillis(); long size = 0; long txid = 0; @@ -4655,7 +4655,7 @@ public class HRegion implements HeapSize { // , Writable{ for (Map.Entry> family : append.getFamilyMap() .entrySet()) { - Store store = stores.get(family.getKey()); + HStore store = stores.get(family.getKey()); List kvs = new ArrayList(family.getValue().size()); // Get previous values for all columns in this family @@ -4738,8 +4738,8 @@ public class HRegion implements HeapSize { // , Writable{ } //Actually write to Memstore now - for (Map.Entry> entry : tempMemstore.entrySet()) { - Store store = entry.getKey(); + for (Map.Entry> entry : tempMemstore.entrySet()) { + HStore store = entry.getKey(); size += store.upsert(entry.getValue()); allKVs.addAll(entry.getValue()); } @@ -4791,7 +4791,7 @@ public class HRegion implements HeapSize { // , Writable{ boolean flush = false; WALEdit walEdits = null; List allKVs = new ArrayList(increment.numColumns()); - Map> tempMemstore = new HashMap>(); + Map> tempMemstore = new HashMap>(); long before = EnvironmentEdgeManager.currentTimeMillis(); long size = 0; long txid = 0; @@ -4808,7 +4808,7 @@ public class HRegion implements HeapSize { // , Writable{ for (Map.Entry> family : increment.getFamilyMap().entrySet()) { - Store store = stores.get(family.getKey()); + HStore store = stores.get(family.getKey()); List kvs = new ArrayList(family.getValue().size()); // Get previous values for all columns in this family @@ -4860,8 +4860,8 @@ public class HRegion implements HeapSize { // , Writable{ } //Actually write to Memstore now - for (Map.Entry> entry : tempMemstore.entrySet()) { - Store store = entry.getKey(); + for (Map.Entry> entry : tempMemstore.entrySet()) { + HStore store = entry.getKey(); size += store.upsert(entry.getValue()); allKVs.addAll(entry.getValue()); } @@ -4918,7 +4918,7 @@ public class HRegion implements HeapSize { // , Writable{ Integer lid = obtainRowLock(row); this.updatesLock.readLock().lock(); try { - Store store = stores.get(family); + HStore store = stores.get(family); // Get the old value: Get get = new Get(row); @@ -5029,7 +5029,7 @@ public class HRegion implements HeapSize { // , Writable{ @Override public long heapSize() { long heapSize = DEEP_OVERHEAD; - for(Store store : this.stores.values()) { + for (HStore store : this.stores.values()) { heapSize += store.heapSize(); } // this does not take into account row locks, recent flushes, mvcc entries @@ -5274,7 +5274,7 @@ public class HRegion implements HeapSize { // , Writable{ */ public int getCompactPriority() { int count = Integer.MAX_VALUE; - for(Store store : stores.values()) { + for (HStore store : stores.values()) { count = Math.min(count, store.getCompactPriority()); } return count; @@ -5286,7 +5286,7 @@ public class HRegion implements HeapSize { // , Writable{ * @return true if any store has too many store files */ public boolean needsCompaction() { - for(Store store : stores.values()) { + for (HStore store : stores.values()) { if(store.needsCompaction()) { return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index ce945ac..27efede 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -1142,7 +1142,7 @@ public class HRegionServer implements ClientProtocol, long currentCompactedKVs = 0; synchronized (r.stores) { stores += r.stores.size(); - for (Store store : r.stores.values()) { + for (HStore store : r.stores.values()) { storefiles += store.getStorefilesCount(); storeUncompressedSizeMB += (int) (store.getStoreSizeUncompressed() / 1024 / 1024); @@ -1228,7 +1228,7 @@ public class HRegionServer implements ClientProtocol, for (HRegion r : this.instance.onlineRegions.values()) { if (r == null) continue; - for (Store s : r.getStores().values()) { + for (HStore s : r.getStores().values()) { try { if (s.needsCompaction()) { // Queue a compaction. Will recognize if major is needed. @@ -1369,8 +1369,8 @@ public class HRegionServer implements ClientProtocol, writeRequestsCount += r.writeRequestsCount.get(); synchronized (r.stores) { stores += r.stores.size(); - for (Map.Entry ee : r.stores.entrySet()) { - final Store store = ee.getValue(); + for (Map.Entry ee : r.stores.entrySet()) { + final HStore store = ee.getValue(); final SchemaMetrics schemaMetrics = store.getSchemaMetrics(); { @@ -1644,7 +1644,7 @@ public class HRegionServer implements ClientProtocol, LOG.info("Post open deploy tasks for region=" + r.getRegionNameAsString() + ", daughter=" + daughter); // Do checks to see if we need to compact (references or too many files) - for (Store s : r.getStores().values()) { + for (HStore s : r.getStores().values()) { if (s.hasReferences() || s.needsCompaction()) { getCompactionRequester().requestCompaction(r, s, "Opening Region"); } @@ -2009,7 +2009,7 @@ public class HRegionServer implements ClientProtocol, int storefileIndexSizeMB = 0; synchronized (r.stores) { stores += r.stores.size(); - for (Store store : r.stores.values()) { + for (HStore store : r.stores.values()) { storefiles += store.getStorefilesCount(); storefileSizeMB += (int) (store.getStorefilesSize() / 1024 / 1024); storefileIndexSizeMB += (int) (store.getStorefilesIndexSize() / 1024 / 1024); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java new file mode 100644 index 0000000..11a021c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -0,0 +1,261 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law + * or agreed to in writing, software distributed under the License is + * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.util.List; +import java.util.NavigableSet; + +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.io.HeapSize; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; +import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; +import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; +import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.SchemaAware; + +import com.google.common.collect.ImmutableList; + +/** + * Interface for objects that hold a column family in a Region. Its a memstore and a set of zero or + * more StoreFiles, which stretch backwards over time. + */ +public interface HStore extends SchemaAware, HeapSize { + + // General Accessors + public KeyValue.KVComparator getComparator(); + + List getStorefiles(); + + ImmutableList close() throws IOException; + + /** + * Return a scanner for both the memstore and the HStore files. Assumes we are not in a + * compaction. + * @throws IOException + */ + public KeyValueScanner getScanner(Scan scan, final NavigableSet targetCols) + throws IOException; + + /** + * Increments the value for the given row/family/qualifier. This function will always be seen as + * atomic by other readers because it only puts a single KV to memstore. Thus no read/write + * control necessary. + * @param row + * @param f + * @param qualifier + * @param newValue the new value to set into memstore + * @return memstore size delta + * @throws IOException + */ + public long updateColumnValue(byte[] row, byte[] f, byte[] qualifier, long newValue) + throws IOException; + + /** + * Adds or replaces the specified KeyValues. + *

+ * For each KeyValue specified, if a cell with the same row, family, and qualifier exists in + * MemStore, it will be replaced. Otherwise, it will just be inserted to MemStore. + *

+ * This operation is atomic on each KeyValue (row/family/qualifier) but not necessarily atomic + * across all of them. + * @param kvs + * @return memstore size delta + * @throws IOException + */ + public long upsert(Iterable kvs) throws IOException; + + /** + * Adds a value to the memstore + * @param kv + * @return memstore size delta + */ + public long add(KeyValue kv); + + /** + * Removes a kv from the memstore. The KeyValue is removed only if its key & memstoreTS matches + * the key & memstoreTS value of the kv parameter. + * @param kv + */ + public void rollback(final KeyValue kv); + + /** + * Find the key that matches row exactly, or the one that immediately precedes it. WARNING: + * Only use this method on a table where writes occur with strictly increasing timestamps. This + * method assumes this pattern of writes in order to make it reasonably performant. Also our + * search is dependent on the axiom that deletes are for cells that are in the container that + * follows whether a memstore snapshot or a storefile, not for the current container: i.e. we'll + * see deletes before we come across cells we are to delete. Presumption is that the + * memstore#kvset is processed before memstore#snapshot and so on. + * @param row The row key of the targeted row. + * @return Found keyvalue or null if none found. + * @throws IOException + */ + KeyValue getRowKeyAtOrBefore(final byte[] row) throws IOException; + + // Compaction oriented methods + + boolean throttleCompaction(long compactionSize); + + /** + * getter for CompactionProgress object + * @return CompactionProgress object; can be null + */ + public CompactionProgress getCompactionProgress(); + + public CompactionRequest requestCompaction() throws IOException; + + public CompactionRequest requestCompaction(int priority) throws IOException; + + public void finishRequest(CompactionRequest cr); + + /** + * @return true if we should run a major compaction. + */ + public boolean isMajorCompaction() throws IOException; + + public void triggerMajorCompaction(); + + /** + * See if there's too much store files in this store + * @return true if number of store files is greater than the number defined in minFilesToCompact + */ + public boolean needsCompaction(); + + public int getCompactPriority(); + + /** + * @return The priority that this store should have in the compaction queue + * @param priority + */ + public int getCompactPriority(int priority); + + public StoreFlusher getStoreFlusher(long cacheFlushId); + + // Split oriented methods + + public boolean canSplit(); + + /** + * Determines if Store should be split + * @return byte[] if store should be split, null otherwise. + */ + public byte[] getSplitPoint(); + + // Bulk Load methods + + /** + * This throws a WrongRegionException if the HFile does not fit in this region, or an + * InvalidHFileException if the HFile is not valid. + */ + void assertBulkLoadHFileOk(Path srcPath) throws IOException; + + /** + * This method should only be called from HRegion. It is assumed that the ranges of values in the + * HFile fit within the stores assigned region. (assertBulkLoadHFileOk checks this) + */ + void bulkLoadHFile(String srcPathStr) throws IOException; + + // General accessors into the state of the store + // TODO abstract some of this out into a metrics class + + /** + * @return true if the store has any underlying reference files to older HFiles + */ + public boolean hasReferences(); + + /** + * @return The size of this store's memstore, in bytes + */ + long getMemStoreSize(); + + public HColumnDescriptor getFamily(); + + /** + * @return The maximum memstoreTS in all store files. + */ + public long getMaxMemstoreTS(); + + /** + * @return the data block encoder + */ + public HFileDataBlockEncoder getDataBlockEncoder(); + + /** + * @return the number of files in this store + */ + public int getNumberOfStoreFiles(); + + /** @return aggregate size of all HStores used in the last compaction */ + public long getLastCompactSize(); + + /** @return aggregate size of HStore */ + public long getSize(); + + /** + * @return Count of store files + */ + int getStorefilesCount(); + + /** + * @return The size of the store files, in bytes, uncompressed. + */ + long getStoreSizeUncompressed(); + + /** + * @return The size of the store files, in bytes. + */ + long getStorefilesSize(); + + /** + * @return The size of the store file indexes, in bytes. + */ + long getStorefilesIndexSize(); + + /** + * Returns the total size of all index blocks in the data block indexes, including the root level, + * intermediate levels, and the leaf level for multi-level indexes, or just the root level for + * single-level indexes. + * @return the total size of block indexes in the store + */ + long getTotalStaticIndexSize(); + + /** + * Returns the total byte size of all Bloom filter bit arrays. For compound Bloom filters even the + * Bloom blocks currently not loaded into the block cache are counted. + * @return the total size of all Bloom filters in the store + */ + long getTotalStaticBloomSize(); + + // Test-helper methods + + /** + * Compact the most recent N files. Used in testing. + */ + public void compactRecentForTesting(int N) throws IOException; + + /** + * Used for tests. Get the cache configuration for this Store. + */ + public CacheConfig getCacheConfig(); + + /** + * @return the parent region hosting this store + */ + public HRegion getHRegion(); +} \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java index e8b54f9..d65a9df 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java @@ -60,7 +60,7 @@ extends ConstantSizeRegionSplitPolicy { // Get size to check long sizeToCheck = getSizeToCheck(tableRegionsCount); - for (Store store : region.getStores().values()) { + for (HStore store : region.getStores().values()) { // If any of the stores is unable to split (eg they contain reference files) // then don't split if ((!store.canSplit())) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java index 033c9f8..6c5fdf6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java @@ -522,7 +522,7 @@ public class MemStore implements HeapSize { * @param kvs * @return change in memstore size */ - public long upsert(List kvs) { + public long upsert(Iterable kvs) { this.lock.readLock().lock(); try { long size = 0; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index cb6ed3c..44f1ab4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -437,7 +437,7 @@ class MemStoreFlusher extends HasThread implements FlushRequester { } private boolean isTooManyStoreFiles(HRegion region) { - for (Store hstore: region.stores.values()) { + for (HStore hstore : region.stores.values()) { if (hstore.getStorefilesCount() > this.blockingStoreFilesNumber) { return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java index 518124d..9ad2c6d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionSplitPolicy.java @@ -73,11 +73,11 @@ public abstract class RegionSplitPolicy extends Configured { if (explicitSplitPoint != null) { return explicitSplitPoint; } - Map stores = region.getStores(); + Map stores = region.getStores(); byte[] splitPointFromLargestStore = null; long largestStoreSize = 0; - for (Store s : stores.values()) { + for (HStore s : stores.values()) { byte[] splitPoint = s.getSplitPoint(); long storeSize = s.getSize(); if (splitPoint != null && largestStoreSize < storeSize) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 87a1c13..b1a206e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -106,7 +106,7 @@ import com.google.common.collect.Lists; * not be called directly but by an HRegion manager. */ @InterfaceAudience.Private -public class Store extends SchemaConfigured implements HeapSize { +public class Store extends SchemaConfigured implements HStore { static final Log LOG = LogFactory.getLog(Store.class); protected final MemStore memstore; @@ -363,7 +363,7 @@ public class Store extends SchemaConfigured implements HeapSize { this.dataBlockEncoder = blockEncoder; } - FileStatus [] getStoreFiles() throws IOException { + public FileStatus[] getStoreFiles() throws IOException { return FSUtils.listStatus(this.fs, this.homedir, null); } @@ -443,7 +443,8 @@ public class Store extends SchemaConfigured implements HeapSize { * @param kv * @return memstore size delta */ - protected long add(final KeyValue kv) { + @Override + public long add(final KeyValue kv) { lock.readLock().lock(); try { return this.memstore.add(kv); @@ -474,7 +475,8 @@ public class Store extends SchemaConfigured implements HeapSize { * * @param kv */ - protected void rollback(final KeyValue kv) { + @Override + public void rollback(final KeyValue kv) { lock.readLock().lock(); try { this.memstore.rollback(kv); @@ -486,6 +488,7 @@ public class Store extends SchemaConfigured implements HeapSize { /** * @return All store files. */ + @Override public List getStorefiles() { return this.storefiles; } @@ -494,7 +497,8 @@ public class Store extends SchemaConfigured implements HeapSize { * This throws a WrongRegionException if the HFile does not fit in this * region, or an InvalidHFileException if the HFile is not valid. */ - void assertBulkLoadHFileOk(Path srcPath) throws IOException { + @Override + public void assertBulkLoadHFileOk(Path srcPath) throws IOException { HFile.Reader reader = null; try { LOG.info("Validating hfile at " + srcPath + " for inclusion in " @@ -559,7 +563,8 @@ public class Store extends SchemaConfigured implements HeapSize { * ranges of values in the HFile fit within the stores assigned region. * (assertBulkLoadHFileOk checks this) */ - void bulkLoadHFile(String srcPathStr) throws IOException { + @Override + public void bulkLoadHFile(String srcPathStr) throws IOException { Path srcPath = new Path(srcPathStr); // Copy the file if it's on another filesystem @@ -625,7 +630,8 @@ public class Store extends SchemaConfigured implements HeapSize { * * @throws IOException */ - ImmutableList close() throws IOException { + @Override + public ImmutableList close() throws IOException { this.lock.writeLock().lock(); try { ImmutableList result = storefiles; @@ -672,8 +678,9 @@ public class Store extends SchemaConfigured implements HeapSize { } /** - * Snapshot this stores memstore. Call before running - * {@link #flushCache(long, SortedSet)} so it has some work to do. + * Snapshot this stores memstore. Call before running + * {@link #flushCache(long, SortedSet, TimeRangeTracker, AtomicLong, MonitoredTask)} so it has + * some work to do. */ void snapshot() { this.memstore.snapshot(); @@ -1115,7 +1122,8 @@ public class Store extends SchemaConfigured implements HeapSize { } } - boolean hasReferences() { + @Override + public boolean hasReferences() { return hasReferences(this.storefiles); } @@ -1157,10 +1165,11 @@ public class Store extends SchemaConfigured implements HeapSize { return this.compactor.getProgress(); } - /* + /** * @return True if we should run a major compaction. */ - boolean isMajorCompaction() throws IOException { + @Override + public boolean isMajorCompaction() throws IOException { for (StoreFile sf : this.storefiles) { if (sf.getReader() == null) { LOG.debug("StoreFile " + sf + " has null Reader"); @@ -1699,7 +1708,8 @@ public class Store extends SchemaConfigured implements HeapSize { * @return Found keyvalue or null if none found. * @throws IOException */ - KeyValue getRowKeyAtOrBefore(final byte[] row) throws IOException { + @Override + public KeyValue getRowKeyAtOrBefore(final byte[] row) throws IOException { // If minVersions is set, we will not ignore expired KVs. // As we're only looking for the latest matches, that should be OK. // With minVersions > 0 we guarantee that any KV that has any version @@ -1978,21 +1988,24 @@ public class Store extends SchemaConfigured implements HeapSize { /** * @return Count of store files */ - int getStorefilesCount() { + @Override + public int getStorefilesCount() { return this.storefiles.size(); } /** * @return The size of the store files, in bytes, uncompressed. */ - long getStoreSizeUncompressed() { + @Override + public long getStoreSizeUncompressed() { return this.totalUncompressedBytes; } /** * @return The size of the store files, in bytes. */ - long getStorefilesSize() { + @Override + public long getStorefilesSize() { long size = 0; for (StoreFile s: storefiles) { StoreFile.Reader r = s.getReader(); @@ -2008,7 +2021,8 @@ public class Store extends SchemaConfigured implements HeapSize { /** * @return The size of the store file indexes, in bytes. */ - long getStorefilesIndexSize() { + @Override + public long getStorefilesIndexSize() { long size = 0; for (StoreFile s: storefiles) { StoreFile.Reader r = s.getReader(); @@ -2028,7 +2042,8 @@ public class Store extends SchemaConfigured implements HeapSize { * * @return the total size of block indexes in the store */ - long getTotalStaticIndexSize() { + @Override + public long getTotalStaticIndexSize() { long size = 0; for (StoreFile s : storefiles) { size += s.getReader().getUncompressedDataIndexSize(); @@ -2043,7 +2058,8 @@ public class Store extends SchemaConfigured implements HeapSize { * * @return the total size of all Bloom filters in the store */ - long getTotalStaticBloomSize() { + @Override + public long getTotalStaticBloomSize() { long size = 0; for (StoreFile s : storefiles) { StoreFile.Reader r = s.getReader(); @@ -2055,7 +2071,8 @@ public class Store extends SchemaConfigured implements HeapSize { /** * @return The size of this store's memstore, in bytes */ - long getMemStoreSize() { + @Override + public long getMemStoreSize() { return this.memstore.heapSize(); } @@ -2076,7 +2093,8 @@ public class Store extends SchemaConfigured implements HeapSize { } } - boolean throttleCompaction(long compactionSize) { + @Override + public boolean throttleCompaction(long compactionSize) { // see HBASE-5867 for discussion on the default long throttlePoint = conf.getLong( "hbase.regionserver.thread.compaction.throttle", @@ -2084,6 +2102,7 @@ public class Store extends SchemaConfigured implements HeapSize { return compactionSize > throttlePoint; } + @Override public HRegion getHRegion() { return this.region; } @@ -2138,8 +2157,8 @@ public class Store extends SchemaConfigured implements HeapSize { * @return memstore size delta * @throws IOException */ - public long upsert(List kvs) - throws IOException { + @Override + public long upsert(Iterable kvs) throws IOException { this.lock.readLock().lock(); try { // TODO: Make this operation atomic w/ MVCC diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java index 12abb61..1f1545b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/example/TestZooKeeperTableArchiveClient.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner; import org.apache.hadoop.hbase.regionserver.CheckedArchivingHFileCleaner; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSUtils; @@ -177,7 +178,7 @@ public class TestZooKeeperTableArchiveClient { loadAndCompact(region); // check that we actually have some store files that were archived - Store store = region.getStore(TEST_FAM); + HStore store = region.getStore(TEST_FAM); Path storeArchiveDir = HFileArchiveTestingUtil.getStoreArchivePath(UTIL.getConfiguration(), region, store); @@ -337,7 +338,7 @@ public class TestZooKeeperTableArchiveClient { loadAndCompact(region); // check that we actually have some store files that were archived - Store store = region.getStore(TEST_FAM); + HStore store = region.getStore(TEST_FAM); Path storeArchiveDir = HFileArchiveTestingUtil.getStoreArchivePath(UTIL.getConfiguration(), region, store); @@ -375,7 +376,7 @@ public class TestZooKeeperTableArchiveClient { * Compact all the store files in a given region. */ private void compactRegion(HRegion region, byte[] family) throws IOException { - Store store = region.getStores().get(TEST_FAM); + HStore store = region.getStores().get(TEST_FAM); store.compactRecentForTesting(store.getStorefiles().size()); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index 3d1ba3e..9a6a377 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; @@ -4593,7 +4594,7 @@ public class TestFromClientSide { String regionName = table.getRegionLocations().firstKey().getEncodedName(); HRegion region = TEST_UTIL.getRSForFirstRegionInTable( tableName).getFromOnlineRegions(regionName); - Store store = region.getStores().values().iterator().next(); + HStore store = region.getStores().values().iterator().next(); CacheConfig cacheConf = store.getCacheConfig(); cacheConf.setCacheDataOnWrite(true); cacheConf.setEvictOnClose(true); @@ -4668,7 +4669,7 @@ public class TestFromClientSide { assertEquals(++expectedBlockMiss, cache.getStats().getMissCount()); } - private void waitForStoreFileCount(Store store, int count, int timeout) + private void waitForStoreFileCount(HStore store, int count, int timeout) throws InterruptedException { long start = System.currentTimeMillis(); while (start + timeout > System.currentTimeMillis() && diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java index be4bc85..3860ed1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java @@ -123,7 +123,7 @@ public class CompactionTool implements Tool { // list of files instead of have Store search its home dir. return new Store(tmpdir, region, hcd, fs, getConf()) { @Override - FileStatus[] getStoreFiles() throws IOException { + public FileStatus[] getStoreFiles() throws IOException { return this.fs.listStatus(getHomedir()); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java index 6cbb2bc..b0453f5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestAtomicOperation.java @@ -127,7 +127,7 @@ public class TestAtomicOperation extends HBaseTestCase { assertEquals(value+amount, result); - Store store = region.getStore(fam1); + Store store = (Store) region.getStore(fam1); // ICV removes any extra values floating around in there. assertEquals(1, store.memstore.kvset.size()); assertTrue(store.memstore.snapshot.isEmpty()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index ec53a28..d455b75 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -170,10 +170,10 @@ public class TestCompaction extends HBaseTestCase { throws Exception { Map replaceBlockCache = new HashMap(); - for (Entry pair : r.getStores().entrySet()) { - Store store = pair.getValue(); + for (Entry pair : r.getStores().entrySet()) { + Store store = (Store) pair.getValue(); HFileDataBlockEncoder blockEncoder = store.getDataBlockEncoder(); - replaceBlockCache.put(pair.getValue(), blockEncoder); + replaceBlockCache.put(store, blockEncoder); final DataBlockEncoding inCache = DataBlockEncoding.PREFIX; final DataBlockEncoding onDisk = inCacheOnly ? DataBlockEncoding.NONE : inCache; @@ -206,7 +206,7 @@ public class TestCompaction extends HBaseTestCase { assertEquals(compactionThreshold, result.size()); // see if CompactionProgress is in place but null - for (Store store: this.r.stores.values()) { + for (HStore store : this.r.stores.values()) { assertNull(store.getCompactionProgress()); } @@ -215,7 +215,7 @@ public class TestCompaction extends HBaseTestCase { // see if CompactionProgress has done its thing on at least one store int storeCount = 0; - for (Store store: this.r.stores.values()) { + for (HStore store : this.r.stores.values()) { CompactionProgress progress = store.getCompactionProgress(); if( progress != null ) { ++storeCount; @@ -281,7 +281,8 @@ public class TestCompaction extends HBaseTestCase { // Multiple versions allowed for an entry, so the delete isn't enough // Lower TTL and expire to ensure that all our entries have been wiped final int ttl = 1000; - for (Store store: this.r.stores.values()) { + for (HStore hstore : this.r.stores.values()) { + Store store = ((Store) hstore); Store.ScanInfo old = store.scanInfo; Store.ScanInfo si = new Store.ScanInfo(old.getFamily(), old.getMinVersions(), old.getMaxVersions(), ttl, @@ -302,7 +303,7 @@ public class TestCompaction extends HBaseTestCase { conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, delay); conf.setFloat("hbase.hregion.majorcompaction.jitter", jitterPct); - Store s = r.getStore(COLUMN_FAMILY); + Store s = ((Store) r.getStore(COLUMN_FAMILY)); try { createStoreFile(r); createStoreFile(r); @@ -435,7 +436,7 @@ public class TestCompaction extends HBaseTestCase { assertEquals(compactionThreshold, result.size()); // do a compaction - Store store2 = this.r.stores.get(fam2); + HStore store2 = this.r.stores.get(fam2); int numFiles1 = store2.getStorefiles().size(); assertTrue("Was expecting to see 4 store files", numFiles1 > compactionThreshold); // > 3 store2.compactRecentForTesting(compactionThreshold); // = 3 @@ -512,7 +513,7 @@ public class TestCompaction extends HBaseTestCase { spyR.compactStores(); // ensure that the compaction stopped, all old files are intact, - Store s = r.stores.get(COLUMN_FAMILY); + HStore s = r.stores.get(COLUMN_FAMILY); assertEquals(compactionThreshold, s.getStorefilesCount()); assertTrue(s.getStorefilesSize() > 15*1000); // and no new store files persisted past compactStores() @@ -536,7 +537,8 @@ public class TestCompaction extends HBaseTestCase { // Multiple versions allowed for an entry, so the delete isn't enough // Lower TTL and expire to ensure that all our entries have been wiped final int ttl = 1000; - for (Store store: this.r.stores.values()) { + for (HStore hstore: this.r.stores.values()) { + Store store = (Store)hstore; Store.ScanInfo old = store.scanInfo; Store.ScanInfo si = new Store.ScanInfo(old.getFamily(), old.getMinVersions(), old.getMaxVersions(), ttl, @@ -583,7 +585,7 @@ public class TestCompaction extends HBaseTestCase { for (int i = 0; i < nfiles; i++) { createStoreFile(r); } - Store store = r.getStore(COLUMN_FAMILY); + Store store = (Store) r.getStore(COLUMN_FAMILY); List storeFiles = store.getStorefiles(); long maxId = StoreFile.getMaxSequenceIdInList(storeFiles); @@ -621,7 +623,7 @@ public class TestCompaction extends HBaseTestCase { * Test for HBASE-5920 - Test user requested major compactions always occurring */ public void testNonUserMajorCompactionRequest() throws Exception { - Store store = r.getStore(COLUMN_FAMILY); + HStore store = r.getStore(COLUMN_FAMILY); createStoreFile(r); for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) { createStoreFile(r); @@ -640,7 +642,7 @@ public class TestCompaction extends HBaseTestCase { * Test for HBASE-5920 */ public void testUserMajorCompactionRequest() throws IOException{ - Store store = r.getStore(COLUMN_FAMILY); + HStore store = r.getStore(COLUMN_FAMILY); createStoreFile(r); for (int i = 0; i < MAX_FILES_TO_COMPACT + 1; i++) { createStoreFile(r); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index f881da9..85f8e4f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -236,7 +236,7 @@ public class TestHRegion extends HBaseTestCase { MonitoredTask status = TaskMonitor.get().createStatus(method); Map maxSeqIdInStores = new TreeMap( Bytes.BYTES_COMPARATOR); - for (Store store : region.getStores().values()) { + for (HStore store : region.getStores().values()) { maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId - 1); } @@ -288,7 +288,7 @@ public class TestHRegion extends HBaseTestCase { MonitoredTask status = TaskMonitor.get().createStatus(method); Map maxSeqIdInStores = new TreeMap( Bytes.BYTES_COMPARATOR); - for (Store store : region.getStores().values()) { + for (HStore store : region.getStores().values()) { maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), recoverSeqId - 1); } @@ -336,7 +336,7 @@ public class TestHRegion extends HBaseTestCase { Map maxSeqIdInStores = new TreeMap( Bytes.BYTES_COMPARATOR); - for (Store store : region.getStores().values()) { + for (HStore store : region.getStores().values()) { maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(), minSeqId); } long seqId = region.replayRecoveredEditsIfAny(regiondir, @@ -864,7 +864,7 @@ public class TestHRegion extends HBaseTestCase { put.add(kv); //checkAndPut with wrong value - Store store = region.getStore(fam1); + Store store = (Store) region.getStore(fam1); store.memstore.kvset.size(); boolean res = region.checkAndMutate(row1, fam1, qf1, CompareOp.EQUAL, @@ -1379,10 +1379,10 @@ public class TestHRegion extends HBaseTestCase { // extract the key values out the memstore: // This is kinda hacky, but better than nothing... long now = System.currentTimeMillis(); - KeyValue firstKv = region.getStore(fam1).memstore.kvset.first(); + KeyValue firstKv = ((Store) region.getStore(fam1)).memstore.kvset.first(); assertTrue(firstKv.getTimestamp() <= now); now = firstKv.getTimestamp(); - for (KeyValue kv: region.getStore(fam1).memstore.kvset) { + for (KeyValue kv : ((Store) region.getStore(fam1)).memstore.kvset) { assertTrue(kv.getTimestamp() <= now); now = kv.getTimestamp(); } @@ -2320,7 +2320,7 @@ public class TestHRegion extends HBaseTestCase { assertEquals(value+amount, result); - Store store = region.getStore(fam1); + Store store = (Store) region.getStore(fam1); // ICV removes any extra values floating around in there. assertEquals(1, store.memstore.kvset.size()); assertTrue(store.memstore.snapshot.isEmpty()); @@ -2346,7 +2346,7 @@ public class TestHRegion extends HBaseTestCase { region.put(put); // get the store in question: - Store s = region.getStore(fam1); + Store s = (Store) region.getStore(fam1); s.snapshot(); //bam // now increment: @@ -2490,7 +2490,7 @@ public class TestHRegion extends HBaseTestCase { // flush to disk. region.flushcache(); - Store store = region.getStore(fam1); + Store store = (Store) region.getStore(fam1); assertEquals(0, store.memstore.kvset.size()); long r = region.incrementColumnValue(row, fam1, qual1, amount, true); @@ -2516,7 +2516,7 @@ public class TestHRegion extends HBaseTestCase { region.put(put); region.flushcache(); - Store store = region.getStore(fam1); + Store store = (Store) region.getStore(fam1); assertEquals(0, store.memstore.kvset.size()); long r = region.incrementColumnValue(row, fam1, qual3, amount, true); @@ -2562,7 +2562,7 @@ public class TestHRegion extends HBaseTestCase { assertEquals(value+amount, result); - Store store = region.getStore(fam1); + Store store = (Store) region.getStore(fam1); // ICV should update the existing Put with the same timestamp assertEquals(1, store.memstore.kvset.size()); assertTrue(store.memstore.snapshot.isEmpty()); @@ -2578,7 +2578,7 @@ public class TestHRegion extends HBaseTestCase { assertEquals(value+amount, result); - store = region.getStore(fam1); + store = (Store) region.getStore(fam1); // ICV should update the existing Put with the same timestamp assertEquals(2, store.memstore.kvset.size()); assertTrue(store.memstore.snapshot.isEmpty()); @@ -3397,7 +3397,7 @@ public class TestHRegion extends HBaseTestCase { region.flushcache(); } //before compaction - Store store = region.getStore(fam1); + Store store = (Store) region.getStore(fam1); List storeFiles = store.getStorefiles(); for (StoreFile storefile : storeFiles) { StoreFile.Reader reader = storefile.getReader(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java index a383a48..d845231 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java @@ -240,7 +240,7 @@ public class TestRegionServerMetrics { rs.doMetrics(); for (HRegion r : TEST_UTIL.getMiniHBaseCluster().getRegions( Bytes.toBytes(TABLE_NAME))) { - for (Map.Entry storeEntry : r.getStores().entrySet()) { + for (Map.Entry storeEntry : r.getStores().entrySet()) { LOG.info("For region " + r.getRegionNameAsString() + ", CF " + Bytes.toStringBinary(storeEntry.getKey()) + " found store files " + ": " + storeEntry.getValue().getStorefiles()); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java index b7ab976..bc68a86 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.regionserver.FlushRequester; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.Store; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; @@ -181,7 +182,7 @@ public class TestWALReplay { // flush region and make major compaction destServer.getOnlineRegion(destRegion.getRegionName()).flushcache(); // wait to complete major compaction - for (Store store : destServer.getOnlineRegion(destRegion.getRegionName()) + for (HStore store : destServer.getOnlineRegion(destRegion.getRegionName()) .getStores().values()) { store.triggerMajorCompaction(); } @@ -421,7 +422,7 @@ public class TestWALReplay { final AtomicInteger countOfRestoredEdits = new AtomicInteger(0); HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, htd, null) { @Override - protected boolean restoreEdit(Store s, KeyValue kv) { + protected boolean restoreEdit(HStore s, KeyValue kv) { boolean b = super.restoreEdit(s, kv); countOfRestoredEdits.incrementAndGet(); return b; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java index 7609e0e..654e558 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java @@ -34,6 +34,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.Store; /** @@ -221,7 +222,7 @@ public class HFileArchiveTestingUtil { * @param store store that is archiving files * @return {@link Path} to the store archive directory for the given region */ - public static Path getStoreArchivePath(Configuration conf, HRegion region, Store store) { + public static Path getStoreArchivePath(Configuration conf, HRegion region, HStore store) { return HFileArchiveUtil.getStoreArchivePath(conf, region, store.getFamily().getName()); } @@ -233,7 +234,7 @@ public class HFileArchiveTestingUtil { HRegion region = servingRegions.get(0); // check that we actually have some store files that were archived - Store store = region.getStore(storeName); + HStore store = region.getStore(storeName); return HFileArchiveTestingUtil.getStoreArchivePath(util.getConfiguration(), region, store); } }