diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java new file mode 100644 index 0000000..aad2bac --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java @@ -0,0 +1,132 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValue.KVComparator; + +import com.google.common.collect.ImmutableCollection; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +/** + * Default implementation of StoreFileManager. Not thread-safe. + */ +@InterfaceAudience.Private +class DefaultStoreFileManager implements StoreFileManager { + static final Log LOG = LogFactory.getLog(DefaultStoreFileManager.class); + + private final KVComparator kvComparator; + + /** + * List of store files inside this store. This is an immutable list that + * is atomically replaced when its contents change. + */ + private volatile ImmutableList storefiles = null; + + public DefaultStoreFileManager(KVComparator kvComparator) { + this.kvComparator = kvComparator; + } + + @Override + public void loadFiles(List storeFiles) { + sortAndSetStoreFiles(storeFiles); + } + + @Override + public final Collection getStorefiles() { + return storefiles; + } + + @Override + public void insertNewFile(StoreFile sf) { + ArrayList newFiles = new ArrayList(storefiles); + newFiles.add(sf); + sortAndSetStoreFiles(newFiles); + } + + @Override + public ImmutableCollection clearFiles() { + ImmutableList result = storefiles; + storefiles = ImmutableList.of(); + return result; + } + + @Override + public final int getStorefileCount() { + return storefiles.size(); + } + + @Override + public void addCompactionResults( + Collection compactedFiles, Collection results) { + ArrayList newStoreFiles = Lists.newArrayList(storefiles); + newStoreFiles.removeAll(compactedFiles); + if (!results.isEmpty()) { + newStoreFiles.addAll(results); + } + sortAndSetStoreFiles(newStoreFiles); + } + + @Override + public final Iterator getCandidateFilesForRowKeyBefore(final KeyValue targetKey) { + return new ArrayList(Lists.reverse(this.storefiles)).iterator(); + } + + @Override + public Iterator updateCandidateFilesForRowKeyBefore( + Iterator candidateFiles, final KeyValue targetKey, final KeyValue candidate) { + // Default store has nothing useful to do here. + // TODO: move this comment when implementing Level: + // Level store can trim the list by range, removing all the files which cannot have + // any useful candidates less than "candidate". + return candidateFiles; + } + + @Override + public final byte[] getSplitPoint() throws IOException { + if (this.storefiles.isEmpty()) { + return null; + } + return StoreUtils.getLargestFile(this.storefiles).getFileSplitPoint(this.kvComparator); + } + + @Override + public final Collection getFilesForScanOrGet(boolean isGet, + byte[] startRow, byte[] stopRow) { + // We cannot provide any useful input and already have the files sorted by seqNum. + return getStorefiles(); + } + + private void sortAndSetStoreFiles(List storeFiles) { + Collections.sort(storeFiles, StoreFile.Comparators.SEQ_ID); + storefiles = ImmutableList.copyOf(storeFiles); + } +} + diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 474bcfb..66ade2b 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -678,10 +678,7 @@ public class HRegion implements HeapSize { // , Writable{ */ public boolean hasReferences() { for (Store store : this.stores.values()) { - for (StoreFile sf : store.getStorefiles()) { - // Found a reference, return. - if (sf.isReference()) return true; - } + if (store.hasReferences()) return true; } return false; } @@ -1033,24 +1030,22 @@ public class HRegion implements HeapSize { // , Writable{ ThreadPoolExecutor storeCloserThreadPool = getStoreOpenAndCloseThreadPool("StoreCloserThread-" + this.regionInfo.getRegionNameAsString()); - CompletionService> completionService = - new ExecutorCompletionService>( - storeCloserThreadPool); + CompletionService> completionService = + new ExecutorCompletionService>(storeCloserThreadPool); // close each store in parallel for (final Store store : stores.values()) { completionService - .submit(new Callable>() { - public ImmutableList call() throws IOException { + .submit(new Callable>() { + public Collection call() throws IOException { return store.close(); } }); } try { for (int i = 0; i < stores.size(); i++) { - Future> future = completionService - .take(); - ImmutableList storeFileList = future.get(); + Future> future = completionService.take(); + Collection storeFileList = future.get(); result.addAll(storeFileList); } } catch (InterruptedException e) { @@ -3049,8 +3044,7 @@ public class HRegion implements HeapSize { // , Writable{ throw new IllegalArgumentException("No column family : " + new String(column) + " available"); } - List storeFiles = store.getStorefiles(); - for (StoreFile storeFile: storeFiles) { + for (StoreFile storeFile: store.getStorefiles()) { storeFileNames.add(storeFile.getPath().toString()); } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 7b8d74a..75e5ee3 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -23,6 +23,7 @@ import java.io.InterruptedIOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.Iterator; import java.util.List; import java.util.NavigableSet; import java.util.SortedSet; @@ -78,6 +79,7 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.util.StringUtils; import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableCollection; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; @@ -132,12 +134,7 @@ public class HStore implements Store { private ScanInfo scanInfo; - /* - * List of store files inside this store. This is an immutable list that - * is atomically replaced when its contents change. - */ - private volatile ImmutableList storefiles = null; - + private StoreFileManager storeFileManager; final List filesCompacting = Lists.newArrayList(); // All access must be synchronized. @@ -221,7 +218,9 @@ public class HStore implements Store { HStore.closeCheckInterval = conf.getInt( "hbase.hstore.close.check.interval", 10*1000*1000 /* 10 MB */); } - this.storefiles = sortAndClone(loadStoreFiles()); + + this.storeFileManager = new DefaultStoreFileManager(this.comparator); + this.storeFileManager.loadFiles(loadStoreFiles()); // Initialize checksum type from name. The names are CRC32, CRC32C, etc. this.checksumType = getChecksumType(conf); @@ -341,7 +340,7 @@ public class HStore implements Store { } /** - * @return The maximum sequence id in all store files. + * @return The maximum sequence id in all store files. Used for log replay. */ long getMaxSequenceId(boolean includeBulkFiles) { return StoreFile.getMaxSequenceIdInList(this.getStorefiles(), includeBulkFiles); @@ -529,8 +528,8 @@ public class HStore implements Store { * @return All store files. */ @Override - public List getStorefiles() { - return this.storefiles; + public Collection getStorefiles() { + return this.storeFileManager.getStorefiles(); } @Override @@ -633,11 +632,9 @@ public class HStore implements Store { // Append the new storefile into the list this.lock.writeLock().lock(); try { - ArrayList newFiles = new ArrayList(storefiles); - newFiles.add(sf); - this.storefiles = sortAndClone(newFiles); + this.storeFileManager.insertNewFile(sf); } finally { - // We need the lock, as long as we are updating the storefiles + // We need the lock, as long as we are updating the storeFiles // or changing the memstore. Let us release it before calling // notifyChangeReadersObservers. See HBASE-4485 for a possible // deadlock scenario that could have happened if continue to hold @@ -660,13 +657,11 @@ public class HStore implements Store { } @Override - public ImmutableList close() throws IOException { + public ImmutableCollection close() throws IOException { this.lock.writeLock().lock(); try { - ImmutableList result = storefiles; - // Clear so metrics doesn't find them. - storefiles = ImmutableList.of(); + ImmutableCollection result = storeFileManager.clearFiles(); if (!result.isEmpty()) { // initialize the thread pool for closing store files in parallel. @@ -963,7 +958,7 @@ public class HStore implements Store { } /* - * Change storefiles adding into place the Reader produced by this new flush. + * Change storeFiles adding into place the Reader produced by this new flush. * @param sf * @param set That was used to make the passed file p. * @throws IOException @@ -974,13 +969,10 @@ public class HStore implements Store { throws IOException { this.lock.writeLock().lock(); try { - ArrayList newList = new ArrayList(storefiles); - newList.add(sf); - storefiles = sortAndClone(newList); - + this.storeFileManager.insertNewFile(sf); this.memstore.clearSnapshot(set); } finally { - // We need the lock, as long as we are updating the storefiles + // We need the lock, as long as we are updating the storeFiles // or changing the memstore. Let us release it before calling // notifyChangeReadersObservers. See HBASE-4485 for a possible // deadlock scenario that could have happened if continue to hold @@ -1010,14 +1002,13 @@ public class HStore implements Store { * @return all scanners for this store */ protected List getScanners(boolean cacheBlocks, - boolean isGet, - boolean isCompaction, - ScanQueryMatcher matcher) throws IOException { - List storeFiles; + boolean isGet, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, + byte[] stopRow) throws IOException { + Collection storeFilesToScan; List memStoreScanners; this.lock.readLock().lock(); try { - storeFiles = this.getStorefiles(); + storeFilesToScan = this.storeFileManager.getFilesForScanOrGet(isGet, startRow, stopRow); memStoreScanners = this.memstore.getScanners(); } finally { this.lock.readLock().unlock(); @@ -1029,7 +1020,7 @@ public class HStore implements Store { // but now we get them in ascending order, which I think is // actually more correct, since memstore get put at the end. List sfScanners = StoreFileScanner - .getScannersForStoreFiles(storeFiles, cacheBlocks, isGet, isCompaction, matcher); + .getScannersForStoreFiles(storeFilesToScan, cacheBlocks, isGet, isCompaction, matcher); List scanners = new ArrayList(sfScanners.size()+1); scanners.addAll(sfScanners); @@ -1149,15 +1140,21 @@ public class HStore implements Store { return sfs; } - @Override - public void compactRecentForTesting(int N) throws IOException { + /** + * This method tries to compact N recent files for testing. + * Note that because compacting "recent" files only makes sense for some policies, + * e.g. the default one, it assumes default policy is used. It doesn't use policy, + * but instead makes a compaction candidate list by itself. + * @param N Number of files. + */ + public void compactRecentForTestingAssumingDefaultPolicy(int N) throws IOException { List filesToCompact; boolean isMajor; this.lock.readLock().lock(); try { synchronized (filesCompacting) { - filesToCompact = Lists.newArrayList(storefiles); + filesToCompact = Lists.newArrayList(storeFileManager.getStorefiles()); if (!filesCompacting.isEmpty()) { // exclude all files older than the newest file we're currently // compacting. this allows us to preserve contiguity (HBASE-2856) @@ -1172,7 +1169,7 @@ public class HStore implements Store { } filesToCompact = filesToCompact.subList(count - N, count); - isMajor = (filesToCompact.size() == storefiles.size()); + isMajor = (filesToCompact.size() == storeFileManager.getStorefileCount()); filesCompacting.addAll(filesToCompact); Collections.sort(filesCompacting, StoreFile.Comparators.SEQ_ID); } @@ -1200,7 +1197,7 @@ public class HStore implements Store { @Override public boolean hasReferences() { - return StoreUtils.hasReferences(this.storefiles); + return StoreUtils.hasReferences(this.storeFileManager.getStorefiles()); } @Override @@ -1210,15 +1207,14 @@ public class HStore implements Store { @Override public boolean isMajorCompaction() throws IOException { - for (StoreFile sf : this.storefiles) { + for (StoreFile sf : this.storeFileManager.getStorefiles()) { + // TODO: what are these reader checks all over the place? if (sf.getReader() == null) { LOG.debug("StoreFile " + sf + " has null Reader"); return false; } } - - List candidates = new ArrayList(this.storefiles); - return compactionPolicy.isMajorCompaction(candidates); + return compactionPolicy.isMajorCompaction(this.storeFileManager.getStorefiles()); } public CompactionRequest requestCompaction() throws IOException { @@ -1235,8 +1231,8 @@ public class HStore implements Store { this.lock.readLock().lock(); try { synchronized (filesCompacting) { - // candidates = all storefiles not already in compaction queue - List candidates = Lists.newArrayList(storefiles); + // candidates = all StoreFiles not already in compaction queue + List candidates = Lists.newArrayList(storeFileManager.getStorefiles()); if (!filesCompacting.isEmpty()) { // exclude all files older than the newest file we're currently // compacting. this allows us to preserve contiguity (HBASE-2856) @@ -1280,9 +1276,8 @@ public class HStore implements Store { filesCompacting.addAll(filesToCompact.getFilesToCompact()); Collections.sort(filesCompacting, StoreFile.Comparators.SEQ_ID); - // major compaction iff all StoreFiles are included boolean isMajor = - (filesToCompact.getFilesToCompact().size() == this.storefiles.size()); + (filesToCompact.getFilesToCompact().size() == this.getStorefilesCount()); if (isMajor) { // since we're enqueuing a major, update the compaction wait interval this.forceMajor = false; @@ -1378,25 +1373,22 @@ public class HStore implements Store { this.family.getBloomFilterType(), this.dataBlockEncoder); result.createReader(); } + try { this.lock.writeLock().lock(); try { - // Change this.storefiles so it reflects new state but do not + // Change this.storeFiles so it reflects new state but do not // delete old store files until we have sent out notification of // change in case old files are still being accessed by outstanding // scanners. - ArrayList newStoreFiles = Lists.newArrayList(storefiles); - newStoreFiles.removeAll(compactedFiles); - filesCompacting.removeAll(compactedFiles); // safe bc: lock.writeLock() - - // If a StoreFile result, move it into place. May be null. + List results = new ArrayList(1); if (result != null) { - newStoreFiles.add(result); + results.add(result); } - - this.storefiles = sortAndClone(newStoreFiles); + this.storeFileManager.addCompactionResults(compactedFiles, results); + filesCompacting.removeAll(compactedFiles); // safe bc: lock.writeLock() } finally { - // We need the lock, as long as we are updating the storefiles + // We need the lock, as long as we are updating the storeFiles // or changing the memstore. Let us release it before calling // notifyChangeReadersObservers. See HBASE-4485 for a possible // deadlock scenario that could have happened if continue to hold @@ -1423,7 +1415,7 @@ public class HStore implements Store { // 4. Compute new store size this.storeSize = 0L; this.totalUncompressedBytes = 0L; - for (StoreFile hsf : this.storefiles) { + for (StoreFile hsf : this.storeFileManager.getStorefiles()) { StoreFile.Reader r = hsf.getReader(); if (r == null) { LOG.warn("StoreFile " + hsf + " has a null Reader"); @@ -1435,21 +1427,6 @@ public class HStore implements Store { return result; } - public ImmutableList sortAndClone(List storeFiles) { - Collections.sort(storeFiles, StoreFile.Comparators.SEQ_ID); - ImmutableList newList = ImmutableList.copyOf(storeFiles); - return newList; - } - - // //////////////////////////////////////////////////////////////////////////// - // Accessors. - // (This is the only section that is directly useful!) - ////////////////////////////////////////////////////////////////////////////// - @Override - public int getNumberOfStoreFiles() { - return this.storefiles.size(); - } - /* * @param wantedVersions How many versions were asked for. * @return wantedVersions or this families' {@link HConstants#VERSIONS}. @@ -1486,10 +1463,18 @@ public class HStore implements Store { // First go to the memstore. Pick up deletes and candidates. this.memstore.getRowKeyAtOrBefore(state); // Check if match, if we got a candidate on the asked for 'kv' row. - // Process each store file. Run through from newest to oldest. - for (StoreFile sf : Lists.reverse(storefiles)) { - // Update the candidate keys from the current map file - rowAtOrBeforeFromStoreFile(sf, state); + // Process each relevant store file. Run through from newest to oldest. + Iterator sfIterator = + this.storeFileManager.getCandidateFilesForRowKeyBefore(state.getTargetKey()); + while (sfIterator.hasNext()) { + StoreFile sf = sfIterator.next(); + sfIterator.remove(); // Remove sf from iterator. + boolean haveNewCandidate = rowAtOrBeforeFromStoreFile(sf, state); + if (haveNewCandidate) { + // TODO: we may have an optimization here which stops the search if we find exact match. + sfIterator = this.storeFileManager.updateCandidateFilesForRowKeyBefore(sfIterator, + state.getTargetKey(), state.getCandidate()); + } } return state.getCandidate(); } finally { @@ -1502,22 +1487,23 @@ public class HStore implements Store { * @param f * @param state * @throws IOException + * @return True iff the candidate has been updated in the state. */ - private void rowAtOrBeforeFromStoreFile(final StoreFile f, + private boolean rowAtOrBeforeFromStoreFile(final StoreFile f, final GetClosestRowBeforeTracker state) throws IOException { StoreFile.Reader r = f.getReader(); if (r == null) { LOG.warn("StoreFile " + f + " has a null Reader"); - return; + return false; } if (r.getEntries() == 0) { LOG.warn("StoreFile " + f + " is a empty store file"); - return; + return false; } // TODO: Cache these keys rather than make each time? byte [] fk = r.getFirstKey(); - if (fk == null) return; + if (fk == null) return false; KeyValue firstKV = KeyValue.createKeyValueFromKey(fk, 0, fk.length); byte [] lk = r.getLastKey(); KeyValue lastKV = KeyValue.createKeyValueFromKey(lk, 0, lk.length); @@ -1525,7 +1511,7 @@ public class HStore implements Store { if (this.comparator.compareRows(lastKV, firstOnRow) < 0) { // If last key in file is not of the target table, no candidates in this // file. Return. - if (!state.isTargetTable(lastKV)) return; + if (!state.isTargetTable(lastKV)) return false; // If the row we're looking for is past the end of file, set search key to // last key. TODO: Cache last and first key rather than make each time. firstOnRow = new KeyValue(lastKV.getRow(), HConstants.LATEST_TIMESTAMP); @@ -1533,10 +1519,10 @@ public class HStore implements Store { // Get a scanner that caches blocks and that uses pread. HFileScanner scanner = r.getScanner(true, true, false); // Seek scanner. If can't seek it, return. - if (!seekToScanner(scanner, firstOnRow, firstKV)) return; + if (!seekToScanner(scanner, firstOnRow, firstKV)) return false; // If we found candidate on firstOnRow, just return. THIS WILL NEVER HAPPEN! // Unlikely that there'll be an instance of actual first row in table. - if (walkForwardInSingleRow(scanner, firstOnRow, state)) return; + if (walkForwardInSingleRow(scanner, firstOnRow, state)) return true; // If here, need to start backing up. while (scanner.seekBefore(firstOnRow.getBuffer(), firstOnRow.getKeyOffset(), firstOnRow.getKeyLength())) { @@ -1546,10 +1532,11 @@ public class HStore implements Store { // Make new first on row. firstOnRow = new KeyValue(kv.getRow(), HConstants.LATEST_TIMESTAMP); // Seek scanner. If can't seek it, break. - if (!seekToScanner(scanner, firstOnRow, firstKV)) break; + if (!seekToScanner(scanner, firstOnRow, firstKV)) return false; // If we find something, break; - if (walkForwardInSingleRow(scanner, firstOnRow, state)) break; + if (walkForwardInSingleRow(scanner, firstOnRow, state)) return true; } + return false; } /* @@ -1608,17 +1595,12 @@ public class HStore implements Store { public boolean canSplit() { this.lock.readLock().lock(); try { - // Not splitable if we find a reference store file present in the store. - for (StoreFile sf : storefiles) { - if (sf.isReference()) { - if (LOG.isDebugEnabled()) { - LOG.debug(sf + " is not splittable"); - } - return false; - } + // Not split-able if we find a reference store file present in the store. + boolean result = !hasReferences(); + if (!result && LOG.isDebugEnabled()) { + LOG.debug("Cannot split region due to reference files being there"); } - - return true; + return result; } finally { this.lock.readLock().unlock(); } @@ -1628,64 +1610,14 @@ public class HStore implements Store { public byte[] getSplitPoint() { this.lock.readLock().lock(); try { - // sanity checks - if (this.storefiles.isEmpty()) { - return null; - } // Should already be enforced by the split policy! assert !this.region.getRegionInfo().isMetaRegion(); - - // Not splitable if we find a reference store file present in the store. - long maxSize = 0L; - StoreFile largestSf = null; - for (StoreFile sf : storefiles) { - if (sf.isReference()) { - // Should already be enforced since we return false in this case - assert false : "getSplitPoint() called on a region that can't split!"; - return null; - } - - StoreFile.Reader r = sf.getReader(); - if (r == null) { - LOG.warn("Storefile " + sf + " Reader is null"); - continue; - } - - long size = r.length(); - if (size > maxSize) { - // This is the largest one so far - maxSize = size; - largestSf = sf; - } - } - - StoreFile.Reader r = largestSf.getReader(); - if (r == null) { - LOG.warn("Storefile " + largestSf + " Reader is null"); + // Not split-able if we find a reference store file present in the store. + if (hasReferences()) { + assert false : "getSplitPoint() called on a region that can't split!"; return null; } - // Get first, last, and mid keys. Midkey is the key that starts block - // in middle of hfile. Has column and timestamp. Need to return just - // the row we want to split on as midkey. - byte [] midkey = r.midkey(); - if (midkey != null) { - KeyValue mk = KeyValue.createKeyValueFromKey(midkey, 0, midkey.length); - byte [] fk = r.getFirstKey(); - KeyValue firstKey = KeyValue.createKeyValueFromKey(fk, 0, fk.length); - byte [] lk = r.getLastKey(); - KeyValue lastKey = KeyValue.createKeyValueFromKey(lk, 0, lk.length); - // if the midkey is the same as the first or last keys, then we cannot - // (ever) split this region. - if (this.comparator.compareRows(mk, firstKey) == 0 || - this.comparator.compareRows(mk, lastKey) == 0) { - if (LOG.isDebugEnabled()) { - LOG.debug("cannot split because midkey is the same as first or " + - "last row"); - } - return null; - } - return mk.getRow(); - } + return this.storeFileManager.getSplitPoint(); } catch(IOException e) { LOG.warn("Failed getting store size for " + this, e); } finally { @@ -1741,7 +1673,7 @@ public class HStore implements Store { @Override public int getStorefilesCount() { - return this.storefiles.size(); + return this.storeFileManager.getStorefileCount(); } @Override @@ -1752,7 +1684,7 @@ public class HStore implements Store { @Override public long getStorefilesSize() { long size = 0; - for (StoreFile s: storefiles) { + for (StoreFile s: this.storeFileManager.getStorefiles()) { StoreFile.Reader r = s.getReader(); if (r == null) { LOG.warn("StoreFile " + s + " has a null Reader"); @@ -1766,7 +1698,7 @@ public class HStore implements Store { @Override public long getStorefilesIndexSize() { long size = 0; - for (StoreFile s: storefiles) { + for (StoreFile s: this.storeFileManager.getStorefiles()) { StoreFile.Reader r = s.getReader(); if (r == null) { LOG.warn("StoreFile " + s + " has a null Reader"); @@ -1780,7 +1712,7 @@ public class HStore implements Store { @Override public long getTotalStaticIndexSize() { long size = 0; - for (StoreFile s : storefiles) { + for (StoreFile s : this.storeFileManager.getStorefiles()) { size += s.getReader().getUncompressedDataIndexSize(); } return size; @@ -1789,7 +1721,7 @@ public class HStore implements Store { @Override public long getTotalStaticBloomSize() { long size = 0; - for (StoreFile s : storefiles) { + for (StoreFile s : this.storeFileManager.getStorefiles()) { StoreFile.Reader r = s.getReader(); size += r.getTotalBloomSize(); } @@ -1811,7 +1743,7 @@ public class HStore implements Store { if(priority == Store.PRIORITY_USER) { return Store.PRIORITY_USER; } else { - return this.blockingStoreFileCount - this.storefiles.size(); + return this.blockingStoreFileCount - this.storeFileManager.getStorefileCount(); } } @@ -1923,7 +1855,8 @@ public class HStore implements Store { @Override public boolean needsCompaction() { - return compactionPolicy.needsCompaction(storefiles.size() - filesCompacting.size()); + return compactionPolicy.needsCompaction( + this.storeFileManager.getStorefileCount() - filesCompacting.size()); } @Override diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index 599a92c..7457404 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.IOException; +import java.util.Collection; import java.util.List; import java.util.NavigableSet; @@ -52,7 +53,7 @@ public interface Store extends HeapSize, StoreConfigInformation { // General Accessors public KeyValue.KVComparator getComparator(); - public List getStorefiles(); + public Collection getStorefiles(); /** * Close all the readers We don't need to worry about subsequent requests because the HRegion @@ -60,7 +61,7 @@ public interface Store extends HeapSize, StoreConfigInformation { * @return the {@link StoreFile StoreFiles} that were previously being used. * @throws IOException on failure */ - public ImmutableList close() throws IOException; + public Collection close() throws IOException; /** * Return a scanner for both the memstore and the HStore files. Assumes we are not in a @@ -208,11 +209,6 @@ public interface Store extends HeapSize, StoreConfigInformation { */ public HFileDataBlockEncoder getDataBlockEncoder(); - /** - * @return the number of files in this store - */ - public int getNumberOfStoreFiles(); - /** @return aggregate size of all HStores used in the last compaction */ public long getLastCompactSize(); @@ -257,13 +253,6 @@ public interface Store extends HeapSize, StoreConfigInformation { // Test-helper methods /** - * Compact the most recent N files. Used in testing. - * @param N number of files to compact. Must be less than or equal to current number of files. - * @throws IOException on failure - */ - public void compactRecentForTesting(int N) throws IOException; - - /** * Used for tests. * @return cache configuration for this Store. */ diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 3eb6d25..31b4c8e 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -943,6 +943,38 @@ public class StoreFile { } /** + * Gets the approximate mid-point of this file that is optimal for use in splitting it. + * @param comparator Comparator used to compare KVs. + * @return The split point row, or null if splitting is not possible, or reader is null. + */ + byte[] getFileSplitPoint(KVComparator comparator) throws IOException { + if (this.reader == null) { + LOG.warn("Storefile " + this + " Reader is null; cannot get split point"); + return null; + } + // Get first, last, and mid keys. Midkey is the key that starts block + // in middle of hfile. Has column and timestamp. Need to return just + // the row we want to split on as midkey. + byte [] midkey = this.reader.midkey(); + if (midkey != null) { + KeyValue mk = KeyValue.createKeyValueFromKey(midkey, 0, midkey.length); + byte [] fk = this.reader.getFirstKey(); + KeyValue firstKey = KeyValue.createKeyValueFromKey(fk, 0, fk.length); + byte [] lk = this.reader.getLastKey(); + KeyValue lastKey = KeyValue.createKeyValueFromKey(lk, 0, lk.length); + // if the midkey is the same as the first or last keys, we cannot (ever) split this region. + if (comparator.compareRows(mk, firstKey) == 0 || comparator.compareRows(mk, lastKey) == 0) { + if (LOG.isDebugEnabled()) { + LOG.debug("cannot split because midkey is the same as first or last row"); + } + return null; + } + return mk.getRow(); + } + return null; + } + + /** * A StoreFile writer. Use this to read/write HBase Store Files. It is package * local because it is an implementation detail of the HBase regionserver. */ diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java new file mode 100644 index 0000000..f5c958c --- /dev/null +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java @@ -0,0 +1,123 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.KeyValue.KVComparator; + +import com.google.common.collect.ImmutableCollection; + +/** + * Manages the store files and basic metadata about that that determines the logical structure + * (e.g. what files to return for scan, how to determine split point, and such). + * Does NOT affect the physical structure of files in HDFS. + * Example alternative structures - the default list of files by seqNum; levelDB one sorted + * by level and seqNum. + * + * Implementations are assumed to be not thread safe. + */ +@InterfaceAudience.Private +interface StoreFileManager { + /** + * Loads the initial store files into empty StoreFileManager. + * @param storeFiles The files to load. + */ + public abstract void loadFiles(List storeFiles); + + /** + * Adds new file, either for from MemStore flush or bulk insert, into the structure. + * @param sf New store file. + */ + public abstract void insertNewFile(StoreFile sf); + + /** + * Adds compaction results into the structure. + * @param compactedFiles The input files for the compaction. + * @param results The resulting files for the compaction. + * @return The files that can be removed from storage. Usually, + */ + public abstract void addCompactionResults( + Collection compactedFiles, Collection results); + + /** + * Clears all the files currently in use and returns them. + * @return The files previously in use. + */ + public abstract ImmutableCollection clearFiles(); + + /** + * Gets the snapshot of the store files currently in use. Can be used for things like metrics + * and checks; should not assume anything about relations between store files in the list. + * @return The list of StoreFiles. + */ + public abstract Collection getStorefiles(); + + /** + * Returns the number of files currently in use. + * @return The number of files. + */ + public abstract int getStorefileCount(); + + /** + * Gets the store files to scan for a Scan or Get request. + * @param isGet Whether it's a get. + * @param startRow Start row of the request. + * @param stopRow Stop row of the request. + * @return The list of files that are to be read for this request. + */ + public abstract Collection getFilesForScanOrGet(boolean isGet, + byte[] startRow, byte[] stopRow); + + /** + * Gets initial, full list of candidate store files to check for row-key-before. + * @param targetKey The key that is the basis of the search. + * @return The files that may have the key less than or equal to targetKey, in reverse + * order of new-ness, and preference for target key. + */ + public abstract Iterator getCandidateFilesForRowKeyBefore( + KeyValue targetKey); + + /** + * Updates the candidate list for finding row key before. Based on the list of candidates + * remaining to check from getCandidateFilesForRowKeyBefore, targetKey and current candidate, + * may trim and reorder the list to remove the files where a better candidate cannot be found. + * @param candidateFiles The candidate files not yet checked for better candidates - return + * value from {@link #getCandidateFilesForRowKeyBefore(KeyValue)}, + * with some files already removed. + * @param targetKey The key to search for. + * @param candidate The current best candidate found. + * @return The list to replace candidateFiles. + */ + public abstract Iterator updateCandidateFilesForRowKeyBefore( + Iterator candidateFiles, KeyValue targetKey, KeyValue candidate); + + + /** + * Gets the split point for the split of this set of store files (approx. middle). + * @return The mid-point, or null if no split is possible. + * @throws IOException + */ + public abstract byte[] getSplitPoint() throws IOException; +} diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java index 1294199..74120f2 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java @@ -206,7 +206,7 @@ public class StoreScanner extends NonLazyKeyValueScanner protected List getScannersNoCompaction() throws IOException { final boolean isCompaction = false; return selectScannersFrom(store.getScanners(cacheBlocks, isGet, - isCompaction, matcher)); + isCompaction, matcher, scan.getStartRow(), scan.getStopRow())); } /** diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java index e7b6192..aa9f7df 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java @@ -29,18 +29,19 @@ public class StoreUtils { /** * Creates a deterministic hash code for store file collection. */ - public static Integer getDeterministicRandomSeed(final List files) { + public static Integer getDeterministicRandomSeed(final Collection files) { if (files != null && !files.isEmpty()) { - return files.get(0).getPath().getName().hashCode(); + return files.iterator().next().getPath().getName().hashCode(); } return null; } /** * Determines whether any files in the collection are references. + * @param files The files. */ public static boolean hasReferences(final Collection files) { - if (files != null && files.size() > 0) { + if (files != null) { for (StoreFile hsf: files) { if (hsf.isReference()) { return true; @@ -53,7 +54,7 @@ public class StoreUtils { /** * Gets lowest timestamp from candidate StoreFiles */ - public static long getLowestTimestamp(final List candidates) + public static long getLowestTimestamp(final Collection candidates) throws IOException { long minTs = Long.MAX_VALUE; for (StoreFile storeFile : candidates) { @@ -61,4 +62,24 @@ public class StoreUtils { } return minTs; } + + /** + * Gets the largest file (with reader) out of the list of files. + * @param candidates The files to choose from. + * @return The largest file; null if no file has a reader. + */ + static StoreFile getLargestFile(final Collection candidates) { + long maxSize = -1L; + StoreFile largestSf = null; + for (StoreFile sf : candidates) { + StoreFile.Reader r = sf.getReader(); + if (r == null) continue; + long size = r.length(); + if (size > maxSize) { + maxSize = size; + largestSf = sf; + } + } + return largestSf; + } } diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java index 63e5a59..b4b60a7 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionPolicy.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver.compactions; import java.io.IOException; +import java.util.Collection; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; @@ -66,7 +67,7 @@ public abstract class CompactionPolicy extends Configured { * @return True if we should run a major compaction. */ public abstract boolean isMajorCompaction( - final List filesToCompact) throws IOException; + final Collection filesToCompact) throws IOException; /** * @param compactionSize Total size of some compaction diff --git hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactionPolicy.java hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactionPolicy.java index ddac105..e1daf2a 100644 --- hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactionPolicy.java +++ hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DefaultCompactionPolicy.java @@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.regionserver.compactions; import java.io.IOException; import java.util.ArrayList; import java.util.Calendar; +import java.util.Collection; import java.util.GregorianCalendar; import java.util.List; import java.util.Random; @@ -293,7 +294,7 @@ public class DefaultCompactionPolicy extends CompactionPolicy { * @param filesToCompact Files to compact. Can be null. * @return True if we should run a major compaction. */ - public boolean isMajorCompaction(final List filesToCompact) + public boolean isMajorCompaction(final Collection filesToCompact) throws IOException { boolean result = false; long mcTime = getNextMajorCompactTime(filesToCompact); @@ -308,7 +309,7 @@ public class DefaultCompactionPolicy extends CompactionPolicy { long cfTtl = this.store.getStoreFileTtl(); if (filesToCompact.size() == 1) { // Single file - StoreFile sf = filesToCompact.get(0); + StoreFile sf = filesToCompact.iterator().next(); Long minTimestamp = sf.getMinimumTimestamp(); long oldest = (minTimestamp == null) ? Long.MIN_VALUE @@ -337,7 +338,7 @@ public class DefaultCompactionPolicy extends CompactionPolicy { return result; } - public long getNextMajorCompactTime(final List filesToCompact) { + public long getNextMajorCompactTime(final Collection filesToCompact) { // default = 24hrs long ret = comConf.getMajorCompactionPeriod(); if (ret > 0) { diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java index b4fa63a..a7efa22 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java @@ -4807,11 +4807,11 @@ public class TestFromClientSide { assertEquals(expectedBlockMiss, cache.getStats().getMissCount()); // compact, net minus two blocks, two hits, no misses System.out.println("Compacting"); - assertEquals(2, store.getNumberOfStoreFiles()); + assertEquals(2, store.getStorefilesCount()); store.triggerMajorCompaction(); region.compactStores(); waitForStoreFileCount(store, 1, 10000); // wait 10 seconds max - assertEquals(1, store.getNumberOfStoreFiles()); + assertEquals(1, store.getStorefilesCount()); expectedBlockCount -= 2; // evicted two blocks, cached none assertEquals(expectedBlockCount, cache.getBlockCount()); expectedBlockHits += 2; @@ -4832,12 +4832,12 @@ public class TestFromClientSide { throws InterruptedException { long start = System.currentTimeMillis(); while (start + timeout > System.currentTimeMillis() && - store.getNumberOfStoreFiles() != count) { + store.getStorefilesCount() != count) { Thread.sleep(100); } System.out.println("start=" + start + ", now=" + - System.currentTimeMillis() + ", cur=" + store.getNumberOfStoreFiles()); - assertEquals(count, store.getNumberOfStoreFiles()); + System.currentTimeMillis() + ", cur=" + store.getStorefilesCount()); + assertEquals(count, store.getStorefilesCount()); } @Test diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java index b501eb5..0d3c062 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.HStore; import org.apache.hadoop.hbase.regionserver.InternalScanner; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -151,7 +152,8 @@ public class TestScannerSelectionUsingTTL { // Exercise both compaction codepaths. if (explicitCompaction) { - region.getStore(FAMILY_BYTES).compactRecentForTesting(totalNumFiles); + HStore store = (HStore)region.getStore(FAMILY_BYTES); + store.compactRecentForTestingAssumingDefaultPolicy(totalNumFiles); } else { region.compactStores(); } diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java index 7b27cf5..8b52fe6 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java @@ -23,6 +23,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.io.IOException; +import java.util.Collection; import java.util.TreeMap; import java.util.List; @@ -195,7 +196,7 @@ public class TestLoadIncrementalHFiles { loader.doBulkLoad(dir, table); // Get the store files - List files = util.getHBaseCluster(). + Collection files = util.getHBaseCluster(). getRegions(TABLE).get(0).getStore(FAMILY).getStorefiles(); for (StoreFile file: files) { // the sequenceId gets initialized during createReader diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java index b682f89..6ba98d4 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompaction.java @@ -23,6 +23,7 @@ import static org.mockito.Mockito.spy; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -314,7 +315,7 @@ public class TestCompaction extends HBaseTestCase { // ensure that major compaction time is deterministic DefaultCompactionPolicy c = (DefaultCompactionPolicy)s.compactionPolicy; - List storeFiles = s.getStorefiles(); + Collection storeFiles = s.getStorefiles(); long mcTime = c.getNextMajorCompactTime(storeFiles); for (int i = 0; i < 10; ++i) { assertEquals(mcTime, c.getNextMajorCompactTime(storeFiles)); @@ -439,7 +440,7 @@ public class TestCompaction extends HBaseTestCase { Store store2 = this.r.stores.get(fam2); int numFiles1 = store2.getStorefiles().size(); assertTrue("Was expecting to see 4 store files", numFiles1 > compactionThreshold); // > 3 - store2.compactRecentForTesting(compactionThreshold); // = 3 + ((HStore)store2).compactRecentForTestingAssumingDefaultPolicy(compactionThreshold); // = 3 int numFiles2 = store2.getStorefiles().size(); // Check that we did compact assertTrue("Number of store files should go down", numFiles1 > numFiles2); @@ -587,7 +588,7 @@ public class TestCompaction extends HBaseTestCase { } HStore store = (HStore) r.getStore(COLUMN_FAMILY); - List storeFiles = store.getStorefiles(); + Collection storeFiles = store.getStorefiles(); Compactor tool = store.compactionPolicy.getCompactor(); List newFiles = diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 49a72c4..5639c6c 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.io.InterruptedIOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -3256,7 +3257,7 @@ public class TestHRegion extends HBaseTestCase { } //before compaction HStore store = (HStore) region.getStore(fam1); - List storeFiles = store.getStorefiles(); + Collection storeFiles = store.getStorefiles(); for (StoreFile storefile : storeFiles) { StoreFile.Reader reader = storefile.getReader(); reader.loadFileInfo(); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java index 54ca823..602e38a 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java @@ -163,16 +163,9 @@ public class TestSplitTransaction { * Pass a reference store */ @Test public void testPrepareWithRegionsWithReference() throws IOException { - // create a mock that will act as a reference StoreFile - StoreFile storeFileMock = Mockito.mock(StoreFile.class); - when(storeFileMock.isReference()).thenReturn(true); - - // add the mock to the parent stores HStore storeMock = Mockito.mock(HStore.class); - List storeFileList = new ArrayList(1); - storeFileList.add(storeFileMock); - when(storeMock.getStorefiles()).thenReturn(storeFileList); - when(storeMock.close()).thenReturn(ImmutableList.copyOf(storeFileList)); + when(storeMock.hasReferences()).thenReturn(true); + when(storeMock.close()).thenReturn(ImmutableList.of()); this.parent.stores.put(Bytes.toBytes(""), storeMock); SplitTransaction st = new SplitTransaction(this.parent, GOOD_SPLIT_ROW); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java index bce0bc0..0e5e96f 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStore.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.lang.ref.SoftReference; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -269,14 +270,16 @@ public class TestStore extends TestCase { } private static long getLowestTimeStampFromFS(FileSystem fs, - final List candidates) throws IOException { + final Collection candidates) throws IOException { long minTs = Long.MAX_VALUE; if (candidates.isEmpty()) { return minTs; } Path[] p = new Path[candidates.size()]; - for (int i = 0; i < candidates.size(); ++i) { - p[i] = candidates.get(i).getPath(); + int i = 0; + for (StoreFile sf : candidates) { + p[i] = sf.getPath(); + ++i; } FileStatus[] stats = fs.listStatus(p); @@ -305,7 +308,7 @@ public class TestStore extends TestCase { flush(1); // Now put in place an empty store file. Its a little tricky. Have to // do manually with hacked in sequence id. - StoreFile f = this.store.getStorefiles().get(0); + StoreFile f = this.store.getStorefiles().iterator().next(); Path storedir = f.getPath().getParent(); long seqid = f.getMaxSequenceId(); Configuration c = HBaseConfiguration.create();