diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java index d75e448..8bb8c1e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreCompactor.java @@ -94,8 +94,8 @@ public class DefaultMobStoreCompactor extends DefaultCompactor { org.apache.hadoop.hbase.regionserver.compactions.Compactor.FileDetails fd, boolean shouldDropBehind) throws IOException { // make this writer with tags always because of possible new cells with tags. - return store.createWriterInTmp(fd.maxKeyCount, compactionCompression, true, true, true, - shouldDropBehind); + return ((HStore) store).createWriterInTmp(fd.maxKeyCount, compactionCompression, true, + true, true, shouldDropBehind); } }; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java index eb2564d..209d643 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/DefaultMobStoreFlusher.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.regionserver.DefaultStoreFlusher; import org.apache.hadoop.hbase.regionserver.HMobStore; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.MemStoreSnapshot; import org.apache.hadoop.hbase.regionserver.ScannerContext; @@ -115,8 +116,9 @@ public class DefaultMobStoreFlusher extends DefaultStoreFlusher { synchronized (flushLock) { status.setStatus("Flushing " + store + ": creating writer"); // Write the map out to the disk - writer = store.createWriterInTmp(cellsCount, store.getFamily().getCompression(), - false, true, true, false/*default for dropbehind*/, snapshot.getTimeRangeTracker()); + writer = ((HStore) store).createWriterInTmp(cellsCount, + store.getFamily().getCompressionType(), false, true, true, + false/* default for dropbehind */, snapshot.getTimeRangeTracker()); try { // It's a mob store, flush the cells in a mob way. This is the difference of flushing // between a normal and a mob store. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java index 079501e..380671c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFlusher.java @@ -64,7 +64,8 @@ public class DefaultStoreFlusher extends StoreFlusher { synchronized (flushLock) { status.setStatus("Flushing " + store + ": creating writer"); // Write the map out to the disk - writer = store.createWriterInTmp(cellsCount, store.getFamily().getCompressionType(), + writer = ((HStore) store).createWriterInTmp(cellsCount, + store.getFamily().getCompressionType(), /* isCompaction = */ false, /* includeMVCCReadpoint = */ true, /* includesTags = */ snapshot.isTagsPresent(), diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java index bb57869..048e5e4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllLargeStoresPolicy.java @@ -51,10 +51,10 @@ public class FlushAllLargeStoresPolicy extends FlushLargeStoresPolicy{ public Collection selectStoresToFlush() { // no need to select stores if only one family if (region.getTableDesc().getFamilies().size() == 1) { - return region.stores.values(); + return region.getStores(); } // start selection - Collection stores = region.stores.values(); + Collection stores = region.getStores(); Set specificStoresToFlush = new HashSet(); for (Store store : stores) { if (shouldFlush(store)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllStoresPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllStoresPolicy.java index 0058104..8cffc33 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllStoresPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushAllStoresPolicy.java @@ -29,7 +29,6 @@ public class FlushAllStoresPolicy extends FlushPolicy { @Override public Collection selectStoresToFlush() { - return region.stores.values(); + return region.getStores(); } - } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java index ea4a193..79a5e37 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FlushNonSloppyStoresFirstPolicy.java @@ -52,15 +52,15 @@ public class FlushNonSloppyStoresFirstPolicy extends FlushLargeStoresPolicy { } } if(!specificStoresToFlush.isEmpty()) return specificStoresToFlush; - return region.stores.values(); + return region.getStores(); } @Override protected void configureForRegion(HRegion region) { super.configureForRegion(region); this.flushSizeLowerBound = getFlushSizeLowerBound(region); - for(Store store : region.stores.values()) { - if(store.getMemStore().isSloppy()) { + for (Store store : region.stores.values()) { + if (store.isSloppyMemstore()) { sloppyStores.add(store); } else { regularStores.add(store); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java index 8634e37..bfa1f80 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java @@ -514,10 +514,6 @@ public class HMobStore extends HStore { @Override public void finalizeFlush() { } - @Override public MemStore getMemStore() { - return null; - } - public void updateCellsCountCompactedToMob(long count) { cellsCountCompactedToMob += count; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index d1684a3..f00cb99 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -926,8 +926,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi Future future = completionService.take(); HStore store = future.get(); this.stores.put(store.getFamily().getName(), store); - MemStore memStore = store.getMemStore(); - if(memStore != null && memStore.isSloppy()) { + if (store.isSloppyMemstore()) { hasSloppyStores = true; } @@ -959,7 +958,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi LOG.error("Could not initialize all stores for the region=" + this); for (Store store : this.stores.values()) { try { - store.close(); + ((HStore) store).close(); } catch (IOException e) { LOG.warn(e.getMessage()); } @@ -1534,7 +1533,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi @Override public Pair> call() throws IOException { return new Pair>( - store.getFamily().getName(), store.close()); + store.getFamily().getName(), ((HStore) store).close()); } }); } @@ -2327,7 +2326,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi for (Store s : storesToFlush) { totalFlushableSizeOfFlushableStores += s.getFlushableSize(); - storeFlushCtxs.put(s.getFamily().getName(), s.createFlushContext(flushOpSeqId)); + storeFlushCtxs.put(s.getFamily().getName(), ((HStore) s).createFlushContext(flushOpSeqId)); committedFiles.put(s.getFamily().getName(), null); // for writing stores to WAL storeFlushableSize.put(s.getFamily().getName(), s.getFlushableSize()); } @@ -2547,8 +2546,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } // If we get to here, the HStores have been written. - for(Store storeToFlush :storesToFlush) { - storeToFlush.finalizeFlush(); + for (Store storeToFlush : storesToFlush) { + ((HStore) storeToFlush).finalizeFlush(); } if (wal != null) { wal.completeCacheFlush(this.getRegionInfo().getEncodedNameAsBytes()); @@ -3824,9 +3823,9 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // Any change in how we update Store/MemStore needs to also be done in other applyToMemstore!!!! boolean upsert = delta && store.getFamily().getMaxVersions() == 1; if (upsert) { - return store.upsert(cells, getSmallestReadPoint()); + return ((HStore) store).upsert(cells, getSmallestReadPoint()); } else { - return store.add(cells); + return ((HStore) store).add(cells); } } @@ -3841,7 +3840,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi checkFamily(CellUtil.cloneFamily(cell)); // Unreachable because checkFamily will throw exception } - return store.add(cell); + return ((HStore) store).add(cell); } @Override @@ -4082,7 +4081,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi long editsCount = 0; long intervalEdits = 0; WAL.Entry entry; - Store store = null; + HStore store = null; boolean reported_once = false; ServerNonceManager ng = this.rsServices == null ? null : this.rsServices.getNonceManager(); @@ -4178,7 +4177,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } // Figure which store the edit is meant for. if (store == null || !CellUtil.matchingFamily(cell, store.getFamily().getName())) { - store = getStore(cell); + store = getHStore(cell); } if (store == null) { // This should never happen. Perhaps schema was changed between @@ -4305,7 +4304,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi startRegionOperation(Operation.REPLAY_EVENT); try { - Store store = this.getStore(compaction.getFamilyName().toByteArray()); + HStore store = this.getHStore(compaction.getFamilyName().toByteArray()); if (store == null) { LOG.warn(getRegionInfo().getEncodedName() + " : " + "Found Compaction WAL edit for deleted family:" @@ -4615,7 +4614,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi throws IOException { for (StoreFlushDescriptor storeFlush : flush.getStoreFlushesList()) { byte[] family = storeFlush.getFamilyName().toByteArray(); - Store store = getStore(family); + HStore store = getHStore(family); if (store == null) { LOG.warn(getRegionInfo().getEncodedName() + " : " + "Received a flush commit marker from primary, but the family is not found." @@ -4685,7 +4684,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi private long doDropStoreMemstoreContentsForSeqId(Store s, long currentSeqId) throws IOException { long snapshotSize = s.getFlushableSize(); this.addAndGetGlobalMemstoreSize(-snapshotSize); - StoreFlushContext ctx = s.createFlushContext(currentSeqId); + StoreFlushContext ctx = ((HStore) s).createFlushContext(currentSeqId); ctx.prepare(); ctx.abort(); return snapshotSize; @@ -4783,7 +4782,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi long storeSeqId = store.getMaxSequenceId(); List storeFiles = storeDescriptor.getStoreFileList(); try { - store.refreshStoreFiles(storeFiles); // replace the files with the new ones + ((HStore) store).refreshStoreFiles(storeFiles); // replace the files with the new ones } catch (FileNotFoundException ex) { LOG.warn(getRegionInfo().getEncodedName() + " : " + "At least one of the store files: " + storeFiles @@ -4888,7 +4887,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi for (StoreDescriptor storeDescriptor : bulkLoadEvent.getStoresList()) { // stores of primary may be different now family = storeDescriptor.getFamilyName().toByteArray(); - Store store = getStore(family); + HStore store = getHStore(family); if (store == null) { LOG.warn(getRegionInfo().getEncodedName() + " : " + "Received a bulk load marker from primary, but the family is not found. " @@ -4979,7 +4978,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi long maxSeqIdBefore = store.getMaxSequenceId(); // refresh the store files. This is similar to observing a region open wal marker. - store.refreshStoreFiles(); + ((HStore) store).refreshStoreFiles(); long storeSeqId = store.getMaxSequenceId(); if (storeSeqId < smallestSeqIdInStores) { @@ -5090,12 +5089,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi * @param cell Cell to add. * @return True if we should flush. */ - protected boolean restoreEdit(final Store s, final Cell cell) { - long kvSize = s.add(cell); + protected boolean restoreEdit(final HStore s, final Cell cell) { + long cellSize = s.add(cell); if (this.rsAccounting != null) { - rsAccounting.addAndGetRegionReplayEditsSize(getRegionInfo().getRegionName(), kvSize); + rsAccounting.addAndGetRegionReplayEditsSize(getRegionInfo().getRegionName(), cellSize); } - return isFlushSize(this.addAndGetGlobalMemstoreSize(kvSize)); + return isFlushSize(this.addAndGetGlobalMemstoreSize(cellSize)); } /* @@ -5131,19 +5130,22 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi return this.stores.get(column); } + private HStore getHStore(final byte[] column) { + return (HStore) getStore(column); + } + /** * Return HStore instance. Does not do any copy: as the number of store is limited, we * iterate on the list. */ - private Store getStore(Cell cell) { + private HStore getHStore(Cell cell) { for (Map.Entry famStore : stores.entrySet()) { if (Bytes.equals( cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(), famStore.getKey(), 0, famStore.getKey().length)) { - return famStore.getValue(); + return (HStore) famStore.getValue(); } } - return null; } @@ -5445,7 +5447,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi byte[] familyName = p.getFirst(); String path = p.getSecond(); - Store store = getStore(familyName); + HStore store = getHStore(familyName); if (store == null) { IOException ioe = new org.apache.hadoop.hbase.DoNotRetryIOException( "No such column family " + Bytes.toStringBinary(familyName)); @@ -5503,7 +5505,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi for (Pair p : familyPaths) { byte[] familyName = p.getFirst(); String path = p.getSecond(); - Store store = getStore(familyName); + HStore store = getHStore(familyName); try { String finalPath = path; if (bulkLoadListener != null) { @@ -7048,8 +7050,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // If no WAL, need to stamp it here. CellUtil.setSequenceId(cell, sequenceId); } - Store store = getStore(cell); - addedSize += applyToMemstore(store, cell); + addedSize += applyToMemstore(getHStore(cell), cell); } } // STEP 8. Complete mvcc. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index e9c05c7..e68815f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -28,13 +28,11 @@ import com.google.common.collect.Sets; import java.io.IOException; import java.io.InterruptedIOException; import java.net.InetSocketAddress; -import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.NavigableSet; import java.util.Set; @@ -62,9 +60,6 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; -import org.apache.hadoop.hbase.Tag; -import org.apache.hadoop.hbase.TagType; -import org.apache.hadoop.hbase.TagUtil; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.conf.ConfigurationManager; @@ -542,13 +537,17 @@ public class HStore implements Store { * the primary region files. * @throws IOException */ - @Override public void refreshStoreFiles() throws IOException { Collection newFiles = fs.getStoreFiles(getColumnFamilyName()); refreshStoreFilesInternal(newFiles); } - @Override + /** + * Replaces the store files that the store has with the given files. Mainly used by + * secondary region replicas to keep up to date with + * the primary region files. + * @throws IOException + */ public void refreshStoreFiles(Collection newFiles) throws IOException { List storeFiles = new ArrayList(newFiles.size()); for (String file : newFiles) { @@ -624,7 +623,11 @@ public class HStore implements Store { return storeFile; } - @Override + /** + * Adds a value to the memstore + * @param cell + * @return memstore size delta + */ public long add(final Cell cell) { lock.readLock().lock(); try { @@ -634,7 +637,11 @@ public class HStore implements Store { } } - @Override + /** + * Adds the specified value to the memstore + * @param cells + * @return memstore size delta + */ public long add(final Iterable cells) { lock.readLock().lock(); try { @@ -672,7 +679,10 @@ public class HStore implements Store { return this.storeEngine.getStoreFileManager().getStorefiles(); } - @Override + /** + * This throws a WrongRegionException if the HFile does not fit in this region, or an + * InvalidHFileException if the HFile is not valid. + */ public void assertBulkLoadHFileOk(Path srcPath) throws IOException { HFile.Reader reader = null; try { @@ -743,7 +753,13 @@ public class HStore implements Store { } } - @Override + /** + * This method should only be called from Region. It is assumed that the ranges of values in the + * HFile fit within the stores assigned region. (assertBulkLoadHFileOk checks this) + * + * @param srcPathStr + * @param seqNum sequence Id associated with the HFile + */ public Path bulkLoadHFile(String srcPathStr, long seqNum) throws IOException { Path srcPath = new Path(srcPathStr); Path dstPath = fs.bulkLoadStoreFile(getColumnFamilyName(), srcPath, seqNum); @@ -760,7 +776,6 @@ public class HStore implements Store { return dstPath; } - @Override public void bulkLoadHFile(StoreFileInfo fileInfo) throws IOException { StoreFile sf = createStoreFileAndReader(fileInfo); bulkLoadHFile(sf); @@ -792,7 +807,12 @@ public class HStore implements Store { } } - @Override + /** + * Close all the readers We don't need to worry about subsequent requests because the Region + * holds a write lock that will prevent any more reads or writes. + * @return the {@link StoreFile StoreFiles} that were previously being used. + * @throws IOException on failure + */ public ImmutableCollection close() throws IOException { this.lock.writeLock().lock(); try { @@ -947,7 +967,13 @@ public class HStore implements Store { return sf; } - @Override + /** + * @param maxKeyCount + * @param compression Compression algorithm to use + * @param isCompaction whether we are creating a new file in a compaction + * @param includeMVCCReadpoint whether we should out the MVCC readpoint + * @return Writer for a new StoreFile in the tmp dir. + */ public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag) @@ -956,15 +982,14 @@ public class HStore implements Store { includesTag, false); } - /* + /** * @param maxKeyCount * @param compression Compression algorithm to use * @param isCompaction whether we are creating a new file in a compaction - * @param includesMVCCReadPoint - whether to include MVCC or not - * @param includesTag - includesTag or not + * @param includeMVCCReadpoint whether we should out the MVCC readpoint + * @param shouldDropBehind should the writer drop caches behind writes * @return Writer for a new StoreFile in the tmp dir. */ - @Override public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag, boolean shouldDropBehind) @@ -973,17 +998,17 @@ public class HStore implements Store { includesTag, shouldDropBehind, null); } - /* + /** * @param maxKeyCount * @param compression Compression algorithm to use * @param isCompaction whether we are creating a new file in a compaction - * @param includesMVCCReadPoint - whether to include MVCC or not - * @param includesTag - includesTag or not + * @param includeMVCCReadpoint whether we should out the MVCC readpoint + * @param shouldDropBehind should the writer drop caches behind writes + * @param trt Ready-made timetracker to use. * @return Writer for a new StoreFile in the tmp dir. */ // TODO : allow the Writer factory to create Writers of ShipperListener type only in case of // compaction - @Override public StoreFileWriter createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag, boolean shouldDropBehind, final TimeRangeTracker trt) @@ -1396,13 +1421,14 @@ public class HStore implements Store { /** * Call to complete a compaction. Its for the case where we find in the WAL a compaction * that was not finished. We could find one recovering a WAL after a regionserver crash. - * See HBASE-2231. - * @param compaction + * See HBASE-2331. + * @param compaction the descriptor for compaction + * @param pickCompactionFiles whether or not pick up the new compaction output files and + * add it to the store + * @param removeFiles whether to remove/archive files from filesystem */ - @Override - public void replayCompactionMarker(CompactionDescriptor compaction, - boolean pickCompactionFiles, boolean removeFiles) - throws IOException { + public void replayCompactionMarker(CompactionDescriptor compaction, boolean pickCompactionFiles, + boolean removeFiles) throws IOException { LOG.debug("Completing compaction from the WAL marker"); List compactionInputs = compaction.getCompactionInputList(); List compactionOutputs = Lists.newArrayList(compaction.getCompactionOutputList()); @@ -2073,7 +2099,19 @@ public class HStore implements Store { } } - @Override + /** + * Adds or replaces the specified KeyValues. + *

+ * For each KeyValue specified, if a cell with the same row, family, and qualifier exists in + * MemStore, it will be replaced. Otherwise, it will just be inserted to MemStore. + *

+ * This operation is atomic on each KeyValue (row/family/qualifier) but not necessarily atomic + * across all of them. + * @param cells + * @param readpoint readpoint below which we can safely remove duplicate KVs + * @return memstore size delta + * @throws IOException + */ public long upsert(Iterable cells, long readpoint) throws IOException { this.lock.readLock().lock(); try { @@ -2083,7 +2121,6 @@ public class HStore implements Store { } } - @Override public StoreFlushContext createFlushContext(long cacheFlushId) { return new StoreFlusherImpl(cacheFlushId); } @@ -2432,12 +2469,18 @@ public class HStore implements Store { } } - @Override public void finalizeFlush() { + /** + * This method is called when it is clear that the flush to disk is completed. + * The store may do any post-flush actions at this point. + * One example is to update the wal with sequence number that is known only at the store level. + */ + public void finalizeFlush() { memstore.finalizeFlush(); } - @Override public MemStore getMemStore() { - return memstore; + @Override + public boolean isSloppyMemstore() { + return this.memstore.isSloppy(); } private void clearCompactedfiles(final List filesToRemove) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java index 01e6d06..76e0e6ad 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.regionserver; import java.io.Closeable; import java.io.IOException; -import java.util.Map; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; @@ -88,11 +87,10 @@ public class MetricsRegionWrapperImpl implements MetricsRegionWrapper, Closeable @Override public long getNumStores() { - Map stores = this.region.stores; - if (stores == null) { + if (this.region.stores == null) { return 0; } - return stores.size(); + return this.region.stores.size(); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 7159502..388bcc0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -23,8 +23,6 @@ import java.util.List; import java.util.NavigableSet; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -35,10 +33,8 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver; import org.apache.hadoop.hbase.io.HeapSize; -import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; -import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor; import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; @@ -65,14 +61,6 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf Collection getStorefiles(); /** - * Close all the readers We don't need to worry about subsequent requests because the Region - * holds a write lock that will prevent any more reads or writes. - * @return the {@link StoreFile StoreFiles} that were previously being used. - * @throws IOException on failure - */ - Collection close() throws IOException; - - /** * Return a scanner for both the memstore and the HStore files. Assumes we are not in a * compaction. * @param scan Scan to apply when scanning the stores @@ -129,93 +117,12 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf ScanInfo getScanInfo(); /** - * Adds or replaces the specified KeyValues. - *

- * For each KeyValue specified, if a cell with the same row, family, and qualifier exists in - * MemStore, it will be replaced. Otherwise, it will just be inserted to MemStore. - *

- * This operation is atomic on each KeyValue (row/family/qualifier) but not necessarily atomic - * across all of them. - * @param cells - * @param readpoint readpoint below which we can safely remove duplicate KVs - * @return memstore size delta - * @throws IOException - */ - long upsert(Iterable cells, long readpoint) throws IOException; - - /** - * Adds a value to the memstore - * @param cell - * @return memstore size delta - */ - long add(Cell cell); - - /** - * Adds the specified value to the memstore - * @param cells - * @return memstore size delta - */ - long add(Iterable cells); - - /** * When was the last edit done in the memstore */ long timeOfOldestEdit(); FileSystem getFileSystem(); - - /** - * @param maxKeyCount - * @param compression Compression algorithm to use - * @param isCompaction whether we are creating a new file in a compaction - * @param includeMVCCReadpoint whether we should out the MVCC readpoint - * @return Writer for a new StoreFile in the tmp dir. - */ - StoreFileWriter createWriterInTmp( - long maxKeyCount, - Compression.Algorithm compression, - boolean isCompaction, - boolean includeMVCCReadpoint, - boolean includesTags - ) throws IOException; - - /** - * @param maxKeyCount - * @param compression Compression algorithm to use - * @param isCompaction whether we are creating a new file in a compaction - * @param includeMVCCReadpoint whether we should out the MVCC readpoint - * @param shouldDropBehind should the writer drop caches behind writes - * @return Writer for a new StoreFile in the tmp dir. - */ - StoreFileWriter createWriterInTmp( - long maxKeyCount, - Compression.Algorithm compression, - boolean isCompaction, - boolean includeMVCCReadpoint, - boolean includesTags, - boolean shouldDropBehind - ) throws IOException; - - /** - * @param maxKeyCount - * @param compression Compression algorithm to use - * @param isCompaction whether we are creating a new file in a compaction - * @param includeMVCCReadpoint whether we should out the MVCC readpoint - * @param shouldDropBehind should the writer drop caches behind writes - * @param trt Ready-made timetracker to use. - * @return Writer for a new StoreFile in the tmp dir. - */ - StoreFileWriter createWriterInTmp( - long maxKeyCount, - Compression.Algorithm compression, - boolean isCompaction, - boolean includeMVCCReadpoint, - boolean includesTags, - boolean shouldDropBehind, - final TimeRangeTracker trt - ) throws IOException; - // Compaction oriented methods boolean throttleCompaction(long compactionSize); @@ -265,21 +172,6 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf int getCompactPriority(); - StoreFlushContext createFlushContext(long cacheFlushId); - - /** - * Call to complete a compaction. Its for the case where we find in the WAL a compaction - * that was not finished. We could find one recovering a WAL after a regionserver crash. - * See HBASE-2331. - * @param compaction the descriptor for compaction - * @param pickCompactionFiles whether or not pick up the new compaction output files and - * add it to the store - * @param removeFiles whether to remove/archive files from filesystem - */ - void replayCompactionMarker(CompactionDescriptor compaction, boolean pickCompactionFiles, - boolean removeFiles) - throws IOException; - // Split oriented methods boolean canSplit(); @@ -290,23 +182,6 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf */ byte[] getSplitPoint(); - // Bulk Load methods - - /** - * This throws a WrongRegionException if the HFile does not fit in this region, or an - * InvalidHFileException if the HFile is not valid. - */ - void assertBulkLoadHFileOk(Path srcPath) throws IOException; - - /** - * This method should only be called from Region. It is assumed that the ranges of values in the - * HFile fit within the stores assigned region. (assertBulkLoadHFileOk checks this) - * - * @param srcPathStr - * @param sequenceId sequence Id associated with the HFile - */ - Path bulkLoadHFile(String srcPathStr, long sequenceId) throws IOException; - // General accessors into the state of the store // TODO abstract some of this out into a metrics class @@ -495,15 +370,6 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf boolean hasTooManyStoreFiles(); /** - * Checks the underlying store files, and opens the files that have not - * been opened, and removes the store file readers for store files no longer - * available. Mainly used by secondary region replicas to keep up to date with - * the primary region files. - * @throws IOException - */ - void refreshStoreFiles() throws IOException; - - /** * This value can represent the degree of emergency of compaction for this store. It should be * greater than or equal to 0.0, any value greater than 1.0 means we have too many store files. *

    @@ -520,16 +386,6 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf */ double getCompactionPressure(); - /** - * Replaces the store files that the store has with the given files. Mainly used by - * secondary region replicas to keep up to date with - * the primary region files. - * @throws IOException - */ - void refreshStoreFiles(Collection newFiles) throws IOException; - - void bulkLoadHFile(StoreFileInfo fileInfo) throws IOException; - boolean isPrimaryReplicaStore(); /** @@ -538,11 +394,7 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf void closeAndArchiveCompactedFiles() throws IOException; /** - * This method is called when it is clear that the flush to disk is completed. - * The store may do any post-flush actions at this point. - * One example is to update the wal with sequence number that is known only at the store level. + * @return true if the memstore may need some extra memory space */ - void finalizeFlush(); - - MemStore getMemStore(); + boolean isSloppyMemstore(); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java index a2a0dcc..951e28f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StorefileRefresherChore.java @@ -102,7 +102,7 @@ public class StorefileRefresherChore extends ScheduledChore { // TODO: some stores might see new data from flush, while others do not which // MIGHT break atomic edits across column families. We can fix this with setting // mvcc read numbers that we know every store has seen - store.refreshStoreFiles(); + ((HStore) store).refreshStoreFiles(); } } catch (IOException ex) { LOG.warn("Exception while trying to refresh store files for region:" + r.getRegionInfo() diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java index 22c3ce7..ada0e94 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFlusher.java @@ -109,7 +109,7 @@ public class StripeStoreFlusher extends StoreFlusher { return new StripeMultiFileWriter.WriterFactory() { @Override public StoreFileWriter createWriter() throws IOException { - StoreFileWriter writer = store.createWriterInTmp( + StoreFileWriter writer = ((HStore) store).createWriterInTmp( kvCount, store.getFamily().getCompressionType(), /* isCompaction = */ false, /* includeMVCCReadpoint = */ true, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index f4bd9a8..5bbf14c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -253,7 +253,7 @@ public abstract class Compactor { throws IOException { // When all MVCC readpoints are 0, don't write them. // See HBASE-8166, HBASE-12600, and HBASE-13389. - return store.createWriterInTmp(fd.maxKeyCount, this.compactionCompression, + return ((HStore) store).createWriterInTmp(fd.maxKeyCount, this.compactionCompression, /* isCompaction = */true, /* includeMVCCReadpoint = */fd.maxMVCCReadpoint > 0, /* includesTags = */fd.maxTagsLength > 0, shouldDropBehind); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java index a0dd7ca..01d3f5f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestIOFencing.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; import org.apache.hadoop.hbase.regionserver.HStore; -import org.apache.hadoop.hbase.regionserver.MemStore; import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.Store; @@ -209,10 +208,6 @@ public class TestIOFencing { @Override public void finalizeFlush() { } - - @Override public MemStore getMemStore() { - return null; - } } private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java index fdc6c92..f94d366 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueTestUtil; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; import org.apache.hadoop.hbase.testclassification.MediumTests; @@ -959,12 +960,10 @@ public class TestDefaultMemStore { HBaseTestingUtility hbaseUtility = HBaseTestingUtility.createLocalHTU(conf); HRegion region = hbaseUtility.createTestRegion("foobar", new HColumnDescriptor("foo")); - List stores = region.getStores(); - assertTrue(stores.size() == 1); - - Store s = stores.iterator().next(); edge.setCurrentTimeMillis(1234); - s.add(KeyValueTestUtil.create("r", "f", "q", 100, "v")); + Put p = new Put(Bytes.toBytes("r")); + p.add(KeyValueTestUtil.create("r", "f", "q", 100, "v")); + region.put(p); edge.setCurrentTimeMillis(1234 + 100); StringBuffer sb = new StringBuffer(); assertTrue(!region.shouldFlush(sb)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 612d6cf..f06742b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -298,7 +298,7 @@ public class TestHRegion { put.addColumn(COLUMN_FAMILY_BYTES, null, value); // First put something in current memstore, which will be in snapshot after flusher.prepare() region.put(put); - StoreFlushContext storeFlushCtx = store.createFlushContext(12345); + StoreFlushContext storeFlushCtx = ((HStore) store).createFlushContext(12345); storeFlushCtx.prepare(); // Second put something in current memstore put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value); @@ -342,7 +342,7 @@ public class TestHRegion { Store store = region.getStore(COLUMN_FAMILY_BYTES); // Get some random bytes. byte [] value = Bytes.toBytes(name.getMethodName()); - faultyLog.setStoreFlushCtx(store.createFlushContext(12345)); + faultyLog.setStoreFlushCtx(((HStore) store).createFlushContext(12345)); Put put = new Put(value); put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value); @@ -572,7 +572,7 @@ public class TestHRegion { region.put(p1); // Manufacture an outstanding snapshot -- fake a failed flush by doing prepare step only. Store store = region.getStore(COLUMN_FAMILY_BYTES); - StoreFlushContext storeFlushCtx = store.createFlushContext(12345); + StoreFlushContext storeFlushCtx = ((HStore) store).createFlushContext(12345); storeFlushCtx.prepare(); // Now add two entries to the foreground memstore. Put p2 = new Put(row); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java index 9166101..727ddf3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicas.java @@ -448,7 +448,7 @@ public class TestRegionReplicas { // Refresh store files on the secondary Region secondaryRegion = getRS().getFromOnlineRegions(hriSecondary.getEncodedName()); - secondaryRegion.getStore(f).refreshStoreFiles(); + ((HStore) secondaryRegion.getStore(f)).refreshStoreFiles(); Assert.assertEquals(3, secondaryRegion.getStore(f).getStorefilesCount()); // force compaction diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java index 74826b0..3405b49 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java @@ -171,11 +171,11 @@ public class TestWalAndCompactingMemStoreFlush { String s = "\n\n----------------------------------\n" + "Upon initial insert and before any flush, size of CF1 is:" + cf1MemstoreSizePhaseI + ", is CF1 compacted memstore?:" - + region.getStore(FAMILY1).getMemStore().isSloppy() + ". Size of CF2 is:" + + region.getStore(FAMILY1).isSloppyMemstore() + ". Size of CF2 is:" + cf2MemstoreSizePhaseI + ", is CF2 compacted memstore?:" - + region.getStore(FAMILY2).getMemStore().isSloppy() + ". Size of CF3 is:" + + region.getStore(FAMILY2).isSloppyMemstore() + ". Size of CF3 is:" + cf3MemstoreSizePhaseI + ", is CF3 compacted memstore?:" - + region.getStore(FAMILY3).getMemStore().isSloppy() + "\n"; + + region.getStore(FAMILY3).isSloppyMemstore() + "\n"; // The overall smallest LSN in the region's memstores should be the same as // the LSN of the smallest edit in CF1 @@ -208,8 +208,10 @@ public class TestWalAndCompactingMemStoreFlush { // Since CF1 and CF3 should be flushed to memory (not to disk), // CF2 is going to be flushed to disk. // CF1 - nothing to compact (but flattening), CF3 - should be twice compacted - ((CompactingMemStore) region.getStore(FAMILY1).getMemStore()).flushInMemory(); - ((CompactingMemStore) region.getStore(FAMILY3).getMemStore()).flushInMemory(); + CompactingMemStore cms1 = (CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore; + CompactingMemStore cms3 = (CompactingMemStore) ((HStore) region.getStore(FAMILY3)).memstore; + cms1.flushInMemory(); + cms3.flushInMemory(); region.flush(false); // Recalculate everything @@ -423,11 +425,11 @@ public class TestWalAndCompactingMemStoreFlush { String s = "\n\n----------------------------------\n" + "Upon initial insert and before any flush, size of CF1 is:" + cf1MemstoreSizePhaseI + ", is CF1 compacted memstore?:" - + region.getStore(FAMILY1).getMemStore().isSloppy() + ". Size of CF2 is:" + + region.getStore(FAMILY1).isSloppyMemstore() + ". Size of CF2 is:" + cf2MemstoreSizePhaseI + ", is CF2 compacted memstore?:" - + region.getStore(FAMILY2).getMemStore().isSloppy() + ". Size of CF3 is:" + + region.getStore(FAMILY2).isSloppyMemstore() + ". Size of CF3 is:" + cf3MemstoreSizePhaseI + ", is CF3 compacted memstore?:" - + region.getStore(FAMILY3).getMemStore().isSloppy() + "\n"; + + region.getStore(FAMILY3).isSloppyMemstore() + "\n"; // The overall smallest LSN in the region's memstores should be the same as // the LSN of the smallest edit in CF1 @@ -459,8 +461,10 @@ public class TestWalAndCompactingMemStoreFlush { // Since CF1 and CF3 should be flushed to memory (not to disk), // CF2 is going to be flushed to disk. // CF1 - nothing to compact, CF3 - should be twice compacted - ((CompactingMemStore) region.getStore(FAMILY1).getMemStore()).flushInMemory(); - ((CompactingMemStore) region.getStore(FAMILY3).getMemStore()).flushInMemory(); + CompactingMemStore cms1 = (CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore; + CompactingMemStore cms3 = (CompactingMemStore) ((HStore) region.getStore(FAMILY3)).memstore; + cms1.flushInMemory(); + cms3.flushInMemory(); region.flush(false); // Recalculate everything @@ -670,8 +674,10 @@ public class TestWalAndCompactingMemStoreFlush { cf1MemstoreSizePhaseI + cf2MemstoreSizePhaseI + cf3MemstoreSizePhaseI); // Flush! - ((CompactingMemStore) region.getStore(FAMILY1).getMemStore()).flushInMemory(); - ((CompactingMemStore) region.getStore(FAMILY3).getMemStore()).flushInMemory(); + CompactingMemStore cms1 = (CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore; + CompactingMemStore cms3 = (CompactingMemStore) ((HStore) region.getStore(FAMILY3)).memstore; + cms1.flushInMemory(); + cms3.flushInMemory(); region.flush(false); long cf2MemstoreSizePhaseII = region.getStore(FAMILY2).getMemStoreSize(); @@ -720,8 +726,10 @@ public class TestWalAndCompactingMemStoreFlush { + smallestSeqCF2PhaseIII +", the smallest sequence in CF3:" + smallestSeqCF3PhaseIII + "\n"; // Flush! - ((CompactingMemStore) region.getStore(FAMILY1).getMemStore()).flushInMemory(); - ((CompactingMemStore) region.getStore(FAMILY3).getMemStore()).flushInMemory(); + cms1 = (CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore; + cms3 = (CompactingMemStore) ((HStore) region.getStore(FAMILY3)).memstore; + cms1.flushInMemory(); + cms3.flushInMemory(); region.flush(false); long smallestSeqInRegionCurrentMemstorePhaseIV = diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java index 38d9f99..e566955 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestDateTieredCompactor.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.hadoop.hbase.regionserver.ScanType; @@ -93,15 +94,15 @@ public class TestDateTieredCompactor { // Create store mock that is satisfactory for compactor. HColumnDescriptor col = new HColumnDescriptor(NAME_OF_THINGS); ScanInfo si = new ScanInfo(conf, col, Long.MAX_VALUE, 0, CellComparator.COMPARATOR); - final Store store = mock(Store.class); + final HStore store = mock(HStore.class); when(store.getStorefiles()).thenReturn(storefiles); when(store.getFamily()).thenReturn(col); when(store.getScanInfo()).thenReturn(si); when(store.areWritesEnabled()).thenReturn(true); when(store.getFileSystem()).thenReturn(mock(FileSystem.class)); when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME)); - when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), anyBoolean(), - anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers); + when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), + anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers); when(store.getComparator()).thenReturn(CellComparator.COMPARATOR); long maxSequenceId = StoreFile.getMaxSequenceIdInList(storefiles); when(store.getMaxSequenceId()).thenReturn(maxSequenceId); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java index fa6e62a..6a5b6f7 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java @@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.regionserver.BloomType; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.ScanType; import org.apache.hadoop.hbase.regionserver.ScannerContext; @@ -771,14 +772,13 @@ public class TestStripeCompactionPolicy { private StripeCompactor createCompactor() throws Exception { HColumnDescriptor col = new HColumnDescriptor(Bytes.toBytes("foo")); StoreFileWritersCapture writers = new StoreFileWritersCapture(); - Store store = mock(Store.class); + HStore store = mock(HStore.class); HRegionInfo info = mock(HRegionInfo.class); when(info.getRegionNameAsString()).thenReturn("testRegion"); when(store.getFamily()).thenReturn(col); when(store.getRegionInfo()).thenReturn(info); - when( - store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), anyBoolean(), - anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers); + when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), + anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers); Configuration conf = HBaseConfiguration.create(); conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java index 97331e8..0603fcb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.io.compress.Compression; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.regionserver.ScanInfo; import org.apache.hadoop.hbase.regionserver.ScanType; @@ -195,14 +196,14 @@ public class TestStripeCompactor { // Create store mock that is satisfactory for compactor. HColumnDescriptor col = new HColumnDescriptor(NAME_OF_THINGS); ScanInfo si = new ScanInfo(conf, col, Long.MAX_VALUE, 0, CellComparator.COMPARATOR); - Store store = mock(Store.class); + HStore store = mock(HStore.class); when(store.getFamily()).thenReturn(col); when(store.getScanInfo()).thenReturn(si); when(store.areWritesEnabled()).thenReturn(true); when(store.getFileSystem()).thenReturn(mock(FileSystem.class)); when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME)); - when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), anyBoolean(), - anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers); + when(store.createWriterInTmp(anyLong(), any(Compression.Algorithm.class), + anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers); when(store.getComparator()).thenReturn(CellComparator.COMPARATOR); return new StripeCompactor(conf, store) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java index d9087a6..d3bbaa5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestWALReplay.java @@ -80,6 +80,7 @@ import org.apache.hadoop.hbase.regionserver.FlushRequestListener; import org.apache.hadoop.hbase.regionserver.FlushRequester; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.HRegionServer; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.MemStoreSnapshot; import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl; import org.apache.hadoop.hbase.regionserver.Region; @@ -95,8 +96,6 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileTestUtil; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.wal.AbstractFSWALProvider; -import org.apache.hadoop.hbase.wal.AsyncFSWALProvider; -import org.apache.hadoop.hbase.wal.FSHLogProvider; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALFactory; import org.apache.hadoop.hbase.wal.WALKey; @@ -561,7 +560,7 @@ public abstract class AbstractTestWALReplay { final AtomicInteger countOfRestoredEdits = new AtomicInteger(0); HRegion region3 = new HRegion(basedir, wal3, newFS, newConf, hri, htd, null) { @Override - protected boolean restoreEdit(Store s, Cell cell) { + protected boolean restoreEdit(HStore s, Cell cell) { boolean b = super.restoreEdit(s, cell); countOfRestoredEdits.incrementAndGet(); return b;