diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java index 7589db3..8373312 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java @@ -19,17 +19,6 @@ */ package org.apache.hadoop.hbase.regionserver; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; -import org.apache.hadoop.hbase.RemoteExceptionHandler; -import org.apache.hadoop.hbase.client.HTable; -import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.util.Writables; -import org.apache.hadoop.util.StringUtils; - import java.io.IOException; import java.util.HashSet; import java.util.concurrent.BlockingQueue; @@ -37,14 +26,18 @@ import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantLock; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.RemoteExceptionHandler; +import org.apache.hadoop.util.StringUtils; + /** * Compact region on request and then run split if appropriate */ class CompactSplitThread extends Thread { static final Log LOG = LogFactory.getLog(CompactSplitThread.class); - private HTable root = null; - private HTable meta = null; private final long frequency; private final ReentrantLock lock = new ReentrantLock(); @@ -68,7 +61,6 @@ class CompactSplitThread extends Thread { @Override public void run() { - int count = 0; while (!this.server.isStopRequested()) { HRegion r = null; try { @@ -144,78 +136,39 @@ class CompactSplitThread extends Thread { } } - private void split(final HRegion region, final byte [] midKey) + private void split(final HRegion parent, final byte [] midKey) throws IOException { - final HRegionInfo oldRegionInfo = region.getRegionInfo(); final long startTime = System.currentTimeMillis(); - final HRegion[] newRegions = region.splitRegion(midKey); - if (newRegions == null) { - // Didn't need to be split - return; - } - - // When a region is split, the META table needs to updated if we're - // splitting a 'normal' region, and the ROOT table needs to be - // updated if we are splitting a META region. - HTable t = null; - if (region.getRegionInfo().isMetaTable()) { - // We need to update the root region - if (this.root == null) { - this.root = new HTable(conf, HConstants.ROOT_TABLE_NAME); - } - t = root; - } else { - // For normal regions we need to update the meta region - if (meta == null) { - meta = new HTable(conf, HConstants.META_TABLE_NAME); + SplitTransaction st = new SplitTransaction(parent, midKey); + // If prepare does not return true, for some reason -- logged inside in + // the prepare call -- we are not ready to split just now. Just return. + if (!st.prepare()) return; + try { + st.execute(this.server); + } catch (IOException ioe) { + try { + LOG.info("Running rollback of failed split of " + + parent.getRegionNameAsString() + "; " + ioe.getMessage()); + st.rollback(this.server); + LOG.info("Successful rollback of failed split of " + + parent.getRegionNameAsString()); + return; + } catch (RuntimeException e) { + // If failed rollback, kill this server to avoid having a hole in table. + LOG.info("Failed rollback of failed split of " + + parent.getRegionNameAsString() + " -- aborting server", e); + this.server.abort("Failed split"); } - t = meta; - } - - // Mark old region as offline and split in META. - // NOTE: there is no need for retry logic here. HTable does it for us. - oldRegionInfo.setOffline(true); - oldRegionInfo.setSplit(true); - // Inform the HRegionServer that the parent HRegion is no-longer online. - this.server.removeFromOnlineRegions(oldRegionInfo); - - Put put = new Put(oldRegionInfo.getRegionName()); - put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, - Writables.getBytes(oldRegionInfo)); - put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, - HConstants.EMPTY_BYTE_ARRAY); - put.add(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, - HConstants.EMPTY_BYTE_ARRAY); - put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, - Writables.getBytes(newRegions[0].getRegionInfo())); - put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, - Writables.getBytes(newRegions[1].getRegionInfo())); - t.put(put); - - // If we crash here, then the daughters will not be added and we'll have - // and offlined parent but no daughters to take up the slack. hbase-2244 - // adds fixup to the metascanners. - - // Add new regions to META - for (int i = 0; i < newRegions.length; i++) { - put = new Put(newRegions[i].getRegionName()); - put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, - Writables.getBytes(newRegions[i].getRegionInfo())); - t.put(put); } - // If we crash here, the master will not know of the new daughters and they - // will not be assigned. The metascanner when it runs will notice and take - // care of assigning the new daughters. - - // Now tell the master about the new regions - server.reportSplit(oldRegionInfo, newRegions[0].getRegionInfo(), - newRegions[1].getRegionInfo()); - - LOG.info("region split, META updated, and report to master all" + - " successful. Old region=" + oldRegionInfo.toString() + - ", new regions: " + newRegions[0].toString() + ", " + - newRegions[1].toString() + ". Split took " + + // Now tell the master about the new regions. If we fail here, its OK. + // Basescanner will do fix up. And reporting split to master is going away. + // TODO: Verify this still holds in new master rewrite. + this.server.reportSplit(parent.getRegionInfo(), st.getFirstDaughter(), + st.getSecondDaughter()); + LOG.info("Region split, META updated, and report to master. Parent=" + + parent.getRegionInfo() + ", new regions: " + + st.getFirstDaughter() + ", " + st.getSecondDaughter() + ". Split took " + StringUtils.formatTimeDiff(System.currentTimeMillis(), startTime)); } diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 6dc41a4..4bbf8ef 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -67,7 +67,6 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.IncompatibleFilterException; import org.apache.hadoop.hbase.io.HeapSize; -import org.apache.hadoop.hbase.io.Reference.Range; import org.apache.hadoop.hbase.io.hfile.BlockCache; import org.apache.hadoop.hbase.ipc.HRegionInterface; import org.apache.hadoop.hbase.regionserver.wal.HLog; @@ -123,7 +122,6 @@ import com.google.common.collect.Lists; */ public class HRegion implements HeapSize { // , Writable{ public static final Log LOG = LogFactory.getLog(HRegion.class); - static final String SPLITDIR = "splits"; static final String MERGEDIR = "merges"; final AtomicBoolean closed = new AtomicBoolean(false); @@ -218,7 +216,7 @@ public class HRegion implements HeapSize { // , Writable{ private final long blockingMemStoreSize; final long threadWakeFrequency; // Used to guard splits and closes - private final ReentrantReadWriteLock splitsAndClosesLock = + final ReentrantReadWriteLock splitsAndClosesLock = new ReentrantReadWriteLock(); private final ReentrantReadWriteLock newScannerLock = new ReentrantReadWriteLock(); @@ -226,7 +224,6 @@ public class HRegion implements HeapSize { // , Writable{ // Stop updates lock private final ReentrantReadWriteLock updatesLock = new ReentrantReadWriteLock(); - private final Object splitLock = new Object(); private boolean splitRequest; private final ReadWriteConsistencyControl rwcc = @@ -291,7 +288,7 @@ public class HRegion implements HeapSize { // , Writable{ this.threadWakeFrequency = conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000); String encodedNameStr = this.regionInfo.getEncodedName(); - this.regiondir = new Path(tableDir, encodedNameStr); + this.regiondir = getRegionDir(this.tableDir, encodedNameStr); if (LOG.isDebugEnabled()) { // Write out region name as string and its encoded name. LOG.debug("Creating region " + this); @@ -346,7 +343,7 @@ public class HRegion implements HeapSize { // , Writable{ // Get rid of any splits or merges that were lost in-progress. Clean out // these directories here on open. We may be opening a region that was // being split but we crashed in the middle of it all. - FSUtils.deleteDirectory(this.fs, new Path(regiondir, SPLITDIR)); + SplitTransaction.cleanupAnySplitDetritus(this); FSUtils.deleteDirectory(this.fs, new Path(regiondir, MERGEDIR)); // See if region is meant to run read-only. @@ -369,7 +366,7 @@ public class HRegion implements HeapSize { // , Writable{ * @param initialFiles * @throws IOException */ - private static void moveInitialFilesIntoPlace(final FileSystem fs, + static void moveInitialFilesIntoPlace(final FileSystem fs, final Path initialFiles, final Path regiondir) throws IOException { if (initialFiles != null && fs.exists(initialFiles)) { @@ -468,70 +465,68 @@ public class HRegion implements HeapSize { // , Writable{ * * @throws IOException e */ - public List close(final boolean abort) throws IOException { + public synchronized List close(final boolean abort) + throws IOException { if (isClosed()) { - LOG.warn("region " + this + " already closed"); + LOG.warn("Region " + this + " already closed"); return null; } - synchronized (splitLock) { - boolean wasFlushing = false; - synchronized (writestate) { - // Disable compacting and flushing by background threads for this - // region. - writestate.writesEnabled = false; - wasFlushing = writestate.flushing; - LOG.debug("Closing " + this + ": disabling compactions & flushes"); - while (writestate.compacting || writestate.flushing) { - LOG.debug("waiting for" + - (writestate.compacting ? " compaction" : "") + - (writestate.flushing ? - (writestate.compacting ? "," : "") + " cache flush" : - "") + " to complete for region " + this); - try { - writestate.wait(); - } catch (InterruptedException iex) { - // continue - } + boolean wasFlushing = false; + synchronized (writestate) { + // Disable compacting and flushing by background threads for this + // region. + writestate.writesEnabled = false; + wasFlushing = writestate.flushing; + LOG.debug("Closing " + this + ": disabling compactions & flushes"); + while (writestate.compacting || writestate.flushing) { + LOG.debug("waiting for" + + (writestate.compacting ? " compaction" : "") + + (writestate.flushing ? + (writestate.compacting ? "," : "") + " cache flush" : + "") + " to complete for region " + this); + try { + writestate.wait(); + } catch (InterruptedException iex) { + // continue } } - // If we were not just flushing, is it worth doing a preflush...one - // that will clear out of the bulk of the memstore before we put up - // the close flag? - if (!abort && !wasFlushing && worthPreFlushing()) { - LOG.info("Running close preflush of " + this.getRegionNameAsString()); - internalFlushcache(); - } - newScannerLock.writeLock().lock(); - this.closing.set(true); + } + // If we were not just flushing, is it worth doing a preflush...one + // that will clear out of the bulk of the memstore before we put up + // the close flag? + if (!abort && !wasFlushing && worthPreFlushing()) { + LOG.info("Running close preflush of " + this.getRegionNameAsString()); + internalFlushcache(); + } + newScannerLock.writeLock().lock(); + this.closing.set(true); + try { + splitsAndClosesLock.writeLock().lock(); + LOG.debug("Updates disabled for region, no outstanding scanners on " + this); try { - splitsAndClosesLock.writeLock().lock(); - LOG.debug("Updates disabled for region, no outstanding scanners on " + - this); - try { - // Write lock means no more row locks can be given out. Wait on - // outstanding row locks to come in before we close so we do not drop - // outstanding updates. - waitOnRowLocks(); - LOG.debug("No more row locks outstanding on region " + this); - - // Don't flush the cache if we are aborting - if (!abort) { - internalFlushcache(); - } + // Write lock means no more row locks can be given out. Wait on + // outstanding row locks to come in before we close so we do not drop + // outstanding updates. + waitOnRowLocks(); + LOG.debug("No more row locks outstanding on region " + this); + + // Don't flush the cache if we are aborting + if (!abort) { + internalFlushcache(); + } - List result = new ArrayList(); - for (Store store: stores.values()) { - result.addAll(store.close()); - } - this.closed.set(true); - LOG.info("Closed " + this); - return result; - } finally { - splitsAndClosesLock.writeLock().unlock(); + List result = new ArrayList(); + for (Store store: stores.values()) { + result.addAll(store.close()); } + this.closed.set(true); + LOG.info("Closed " + this); + return result; } finally { - newScannerLock.writeLock().unlock(); + splitsAndClosesLock.writeLock().unlock(); } + } finally { + newScannerLock.writeLock().unlock(); } } @@ -592,6 +587,17 @@ public class HRegion implements HeapSize { // , Writable{ return this.regiondir; } + /** + * Computes the Path of the HRegion + * + * @param tabledir qualified path for table + * @param name ENCODED region name + * @return Path of HRegion directory + */ + public static Path getRegionDir(final Path tabledir, final String name) { + return new Path(tabledir, name); + } + /** @return FileSystem being used by this region */ public FileSystem getFilesystem() { return this.fs; @@ -622,113 +628,6 @@ public class HRegion implements HeapSize { // , Writable{ } /* - * Split the HRegion to create two brand-new ones. This also closes - * current HRegion. Split should be fast since we don't rewrite store files - * but instead create new 'reference' store files that read off the top and - * bottom ranges of parent store files. - * @param splitRow row on which to split region - * @return two brand-new HRegions or null if a split is not needed - * @throws IOException - */ - HRegion [] splitRegion(final byte [] splitRow) throws IOException { - prepareToSplit(); - synchronized (splitLock) { - if (closed.get()) { - return null; - } - // Add start/end key checking: hbase-428. - byte [] startKey = this.regionInfo.getStartKey(); - byte [] endKey = this.regionInfo.getEndKey(); - if (this.comparator.matchingRows(startKey, 0, startKey.length, - splitRow, 0, splitRow.length)) { - LOG.debug("Startkey and midkey are same, not splitting"); - return null; - } - if (this.comparator.matchingRows(splitRow, 0, splitRow.length, - endKey, 0, endKey.length)) { - LOG.debug("Endkey and midkey are same, not splitting"); - return null; - } - LOG.info("Starting split of region " + this); - Path splits = new Path(this.regiondir, SPLITDIR); - if(!this.fs.exists(splits)) { - this.fs.mkdirs(splits); - } - // Calculate regionid to use. Can't be less than that of parent else - // it'll insert into wrong location over in .META. table: HBASE-710. - long rid = EnvironmentEdgeManager.currentTimeMillis(); - if (rid < this.regionInfo.getRegionId()) { - LOG.warn("Clock skew; parent regions id is " + - this.regionInfo.getRegionId() + " but current time here is " + rid); - rid = this.regionInfo.getRegionId() + 1; - } - HRegionInfo regionAInfo = new HRegionInfo(this.regionInfo.getTableDesc(), - startKey, splitRow, false, rid); - Path dirA = getSplitDirForDaughter(splits, regionAInfo); - HRegionInfo regionBInfo = new HRegionInfo(this.regionInfo.getTableDesc(), - splitRow, endKey, false, rid); - Path dirB = getSplitDirForDaughter(splits, regionBInfo); - - // Now close the HRegion. Close returns all store files or null if not - // supposed to close (? What to do in this case? Implement abort of close?) - // Close also does wait on outstanding rows and calls a flush just-in-case. - List hstoreFilesToSplit = close(false); - if (hstoreFilesToSplit == null) { - LOG.warn("Close came back null (Implement abort of close?)"); - throw new RuntimeException("close returned empty vector of HStoreFiles"); - } - - // Split each store file. - for(StoreFile h: hstoreFilesToSplit) { - StoreFile.split(fs, - Store.getStoreHomedir(splits, regionAInfo.getEncodedName(), - h.getFamily()), - h, splitRow, Range.bottom); - StoreFile.split(fs, - Store.getStoreHomedir(splits, regionBInfo.getEncodedName(), - h.getFamily()), - h, splitRow, Range.top); - } - - // Create a region instance and then move the splits into place under - // regionA and regionB. - HRegion regionA = - HRegion.newHRegion(tableDir, log, fs, conf, regionAInfo, null); - moveInitialFilesIntoPlace(this.fs, dirA, regionA.getRegionDir()); - HRegion regionB = - HRegion.newHRegion(tableDir, log, fs, conf, regionBInfo, null); - moveInitialFilesIntoPlace(this.fs, dirB, regionB.getRegionDir()); - - return new HRegion [] {regionA, regionB}; - } - } - - /* - * Get the daughter directories in the splits dir. The splits dir is under - * the parent regions' directory. - * @param splits - * @param hri - * @return Path to split dir. - * @throws IOException - */ - private Path getSplitDirForDaughter(final Path splits, final HRegionInfo hri) - throws IOException { - Path d = - new Path(splits, hri.getEncodedName()); - if (fs.exists(d)) { - // This should never happen; the splits dir will be newly made when we - // come in here. Even if we crashed midway through a split, the reopen - // of the parent region clears out the dir in its initialize method. - throw new IOException("Cannot split; target file collision at " + d); - } - return d; - } - - protected void prepareToSplit() { - // nothing - } - - /* * Do preparation for pending compaction. * @throws IOException */ @@ -2689,17 +2588,6 @@ public class HRegion implements HeapSize { // , Writable{ /** * Computes the Path of the HRegion * - * @param tabledir qualified path for table - * @param name ENCODED region name - * @return Path of HRegion directory - */ - public static Path getRegionDir(final Path tabledir, final String name) { - return new Path(tabledir, name); - } - - /** - * Computes the Path of the HRegion - * * @param rootdir qualified path of HBase root directory * @param info HRegionInfo for the region * @return qualified path of region directory @@ -3268,4 +3156,4 @@ public class HRegion implements HeapSize { // , Writable{ if (bc != null) bc.shutdown(); } } -} +} \ No newline at end of file diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 6a54736..f56e8b9 100644 --- a/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -122,7 +122,7 @@ import org.apache.zookeeper.Watcher.Event.KeeperState; * the HMaster. There are many HRegionServers in a single HBase deployment. */ public class HRegionServer implements HRegionInterface, - HBaseRPCErrorHandler, Runnable, Watcher, Stoppable { + HBaseRPCErrorHandler, Runnable, Watcher, Stoppable, OnlineRegions { public static final Log LOG = LogFactory.getLog(HRegionServer.class); private static final HMsg REPORT_EXITING = new HMsg(Type.MSG_REPORT_EXITING); private static final HMsg REPORT_QUIESCED = new HMsg(Type.MSG_REPORT_QUIESCED); @@ -1464,12 +1464,7 @@ public class HRegionServer implements HRegionInterface, } return; } - this.lock.writeLock().lock(); - try { - this.onlineRegions.put(mapKey, region); - } finally { - this.lock.writeLock().unlock(); - } + addToOnlineRegions(region); } try { HMsg hmsg = new HMsg(HMsg.Type.MSG_REPORT_OPEN, regionInfo); @@ -2146,13 +2141,16 @@ public class HRegionServer implements HRegionInterface, return result; } - /** - * This method removes HRegion corresponding to hri from the Map of onlineRegions. - * - * @param hri the HRegionInfo corresponding to the HRegion to-be-removed. - * @return the removed HRegion, or null if the HRegion was not in onlineRegions. - */ - HRegion removeFromOnlineRegions(HRegionInfo hri) { + public void addToOnlineRegions(final HRegion r) { + this.lock.writeLock().lock(); + try { + this.onlineRegions.put(Bytes.mapKey(r.getRegionInfo().getRegionName()), r); + } finally { + this.lock.writeLock().unlock(); + } + } + + public HRegion removeFromOnlineRegions(HRegionInfo hri) { this.lock.writeLock().lock(); HRegion toReturn = null; try { diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java b/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java new file mode 100644 index 0000000..62f6eab --- /dev/null +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/OnlineRegions.java @@ -0,0 +1,41 @@ +/** + * Copyright 2010 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import org.apache.hadoop.hbase.HRegionInfo; + +/** + * Add and remove online regions. + */ +interface OnlineRegions { + /** + * Add to online regions. + * @param r + */ + void addToOnlineRegions(final HRegion r); + + /** + * This method removes HRegion corresponding to hri from the Map of onlineRegions. + * + * @param hri the HRegionInfo corresponding to the HRegion to-be-removed. + * @return the removed HRegion, or null if the HRegion was not in onlineRegions. + */ + HRegion removeFromOnlineRegions(HRegionInfo hri); +} diff --git a/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java b/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java new file mode 100644 index 0000000..0769a8e --- /dev/null +++ b/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java @@ -0,0 +1,544 @@ +/** + * Copyright 2010 The Apache Software Foundation + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.regionserver; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.ListIterator; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.KeyValue.KVComparator; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.io.Reference.Range; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.FSUtils; +import org.apache.hadoop.hbase.util.PairOfSameType; +import org.apache.hadoop.hbase.util.Writables; + +/** + * Executes region split as a "transaction". Call {@link #prepare()} to setup + * the transaction, {@link #execute(OnlineRegions)} to run the transaction and + * {@link #rollback(OnlineRegions)} to cleanup if execute fails. + * + *

Here is an example of how you would use this class: + *

+ *  SplitTransaction st = new SplitTransaction(this.conf, parent, midKey)
+ *  if (!st.prepare()) return;
+ *  try {
+ *    st.execute(myOnlineRegions);
+ *  } catch (IOException ioe) {
+ *    try {
+ *      st.rollback(myOnlineRegions);
+ *      return;
+ *    } catch (RuntimeException e) {
+ *      myAbortable.abort("Failed split, abort");
+ *    }
+ *  }
+ * 
+ */ +class SplitTransaction { + private static final Log LOG = LogFactory.getLog(SplitTransaction.class); + private static final String SPLITDIR = "splits"; + + /* + * Region to split + */ + private final HRegion parent; + private HRegionInfo hri_a; + private HRegionInfo hri_b; + private Path splitdir; + + /* + * Row to split around + */ + private final byte [] splitrow; + + /** + * Types to add to the transaction journal + */ + enum JournalEntry { + /** + * We took out the region split/close lock. + */ + SPLIT_AND_CLOSE_LOCK, + /** + * We created the temporary split data directory. + */ + CREATE_SPLIT_DIR, + /** + * Closed the parent region. + */ + CLOSED_PARENT_REGION, + /** + * The parent has been taken out of the server's online regions list. + */ + OFFLINED_PARENT, + /** + * Started in on creation of the first daughter region. + */ + STARTED_REGION_A_CREATION, + /** + * Started in on the creation of the second daughter region. + */ + STARTED_REGION_B_CREATION + } + + /* + * Journal of how far the split transaction has progressed. + */ + private final List journal = new ArrayList(); + + /** + * Constructor + * @param c Configuration to use running split + * @param r Region to split + * @param splitrow Row to split around + */ + SplitTransaction(final HRegion r, final byte [] splitrow) { + this.parent = r; + this.splitrow = splitrow; + this.splitdir = getSplitDir(this.parent); + } + + /** + * Does checks on split inputs. + * @return true if the region is splitable else + * false if it is + * not (e.g. its already closed, etc.). If we return + * true, we'll have taken out the parent's + * splitsAndClosesLock and only way to unlock is successful + * {@link #execute(OnlineRegions)} or {@link #rollback(OnlineRegions)} + */ + public boolean prepare() { + boolean prepared = false; + this.parent.splitsAndClosesLock.writeLock().lock(); + try { + if (this.parent.isClosed() || this.parent.isClosing()) return prepared; + HRegionInfo hri = this.parent.getRegionInfo(); + // Check splitrow. + byte [] startKey = hri.getStartKey(); + byte [] endKey = hri.getEndKey(); + KVComparator comparator = this.parent.comparator; + if (comparator.matchingRows(startKey, 0, startKey.length, + this.splitrow, 0, this.splitrow.length)) { + LOG.info("Startkey and splitkey are same, not splitting: " + + Bytes.toString(this.splitrow)); + return prepared; + } + if (comparator.matchingRows(this.splitrow, 0, this.splitrow.length, + endKey, 0, endKey.length)) { + LOG.info("Endkey and splitkey are same, not splitting: " + + Bytes.toString(this.splitrow)); + return prepared; + } + long rid = getDaughterRegionIdTimestamp(hri); + this.hri_a = new HRegionInfo(hri.getTableDesc(), startKey, this.splitrow, + false, rid); + this.hri_b = new HRegionInfo(hri.getTableDesc(), this.splitrow, endKey, + false, rid); + prepared = true; + } finally { + if (!prepared) this.parent.splitsAndClosesLock.writeLock().unlock(); + } + return prepared; + } + + /** + * Calculate daughter regionid to use. + * @param hri Parent {@link HRegionInfo} + * @return Daughter region id (timestamp) to use. + */ + private static long getDaughterRegionIdTimestamp(final HRegionInfo hri) { + long rid = EnvironmentEdgeManager.currentTimeMillis(); + // Regionid is timestamp. Can't be less than that of parent else will insert + // at wrong location in .META. (See HBASE-710). + if (rid < hri.getRegionId()) { + LOG.warn("Clock skew; parent regions id is " + hri.getRegionId() + + " but current time here is " + rid); + rid = hri.getRegionId() + 1; + } + return rid; + } + + /** + * Run the transaction. + * @param or Object that can online/offline parent region. + * @throws IOException If thrown, transaction failed. Call {@link #rollback(OnlineRegions)} + * @return Regions created + * @see #rollback(OnlineRegions) + */ + public PairOfSameType execute(final OnlineRegions or) throws IOException { + return execute(or, or != null); + } + + /** + * Run the transaction. + * @param or Object that can online/offline parent region. Can be null (Tests + * will pass null). + * @param If true, update meta (set to false when testing). + * @throws IOException If thrown, transaction failed. Call {@link #rollback(OnlineRegions)} + * @return Regions created + * @see #rollback(OnlineRegions) + */ + PairOfSameType execute(final OnlineRegions or, final boolean updateMeta) + throws IOException { + LOG.info("Starting split of region " + this.parent); + + // We'll need one of these later but get it now because if we fail there + // is nothing to undo. + HTable t = null; + if (updateMeta) t = getTable(this.parent.getConf()); + + if (!this.parent.splitsAndClosesLock.writeLock().isHeldByCurrentThread()) { + throw new SplitAndCloseWriteLockNotHeld(); + } + this.journal.add(JournalEntry.SPLIT_AND_CLOSE_LOCK); + + createSplitDir(this.parent.getFilesystem(), this.splitdir); + this.journal.add(JournalEntry.CREATE_SPLIT_DIR); + + List hstoreFilesToSplit = this.parent.close(false); + this.journal.add(JournalEntry.CLOSED_PARENT_REGION); + + if (or != null) or.removeFromOnlineRegions(this.parent.getRegionInfo()); + this.journal.add(JournalEntry.OFFLINED_PARENT); + + // Ensure daughter dirs do not exist; if they do, somethings wrong. + ensureDoesNotExist(this.parent.getFilesystem(), this.splitdir, + this.hri_a); + ensureDoesNotExist(this.parent.getFilesystem(), this.splitdir, + this.hri_b); + splitStoreFiles(this.splitdir, hstoreFilesToSplit); + // splitStoreFiles creates daughter region dirs under the parent splits dir + // Nothing to unroll here if failure -- clean up of CREATE_SPLIT_DIR will + // clean this up. + + // Log to the journal that we are creating region A, the first daughter + // region. We could fail halfway through. If we do, we could have left + // stuff in fs that needs cleanup -- a storefile or two. Thats why we + // add entry to journal BEFORE rather than AFTER the change. + this.journal.add(JournalEntry.STARTED_REGION_A_CREATION); + HRegion a = createDaughterRegion(this.hri_a); + + // Ditto + this.journal.add(JournalEntry.STARTED_REGION_B_CREATION); + HRegion b = createDaughterRegion(this.hri_b); + + Put editParentPut = createOfflineParentPut(); + if (t != null) t.put(editParentPut); + + // The is the point of no return. We are committed to the split now. Up to + // a failure editing parent in meta or a crash of the hosting regionserver, + // we could rollback (or, if crash, we could cleanup on redeploy) but now + // meta has been changed, we can only go forward. If the below last steps + // do not complete, repair has to be done by another agent. For example, + // basescanner, at least up till master rewrite, would add daughter rows if + // missing from meta. It could do this because the parent edit includes the + // daughter specs. In Bigtable paper, they have another mechanism where + // some feedback to the master somehow flags it that split is incomplete and + // needs fixup. Whatever the mechanism, its a TODO that we have some fixup. + + // I looked at writing the put of the parent edit above out to the WAL log + // before changing meta with the notion that should we fail, then on replay + // the offlining of the parent and addition of daughters up into meta could + // be reinserted. The edits would have to be 'special' and given how our + // splits work, splitting by region, I think the replay would have to happen + // inside in the split code -- as soon as it saw one of these special edits, + // rather than write the edit out a file for the .META. region to replay or + // somehow, write it out to this regions edits file for it to handle on + // redeploy -- this'd be whacky, we'd be telling meta about a split during + // the deploy of the parent -- instead we'd have to play the edit inside + // in the split code somehow; this would involve a stop-the-splitting till + // meta had been edited which might hold up splitting a good while. + + // Finish up the meta edits. If these fail, another agent needs to do fixup + if (t != null) t.put(createDaughterPut(this.hri_a)); + if (t != null) t.put(createDaughterPut(this.hri_b)); + + // This should not fail because the HTable instance we are using is not + // running a buffer -- its immediately flushing its puts. + if (t != null) t.close(); + // Unlock if successful split. + this.parent.splitsAndClosesLock.writeLock().unlock(); + + return new PairOfSameType(a, b); + } + + private static Path getSplitDir(final HRegion r) { + return new Path(r.getRegionDir(), SPLITDIR); + } + + /** + * @param fs Filesystem to use + * @param splitdir Directory to store temporary split data in + * @throws IOException + * @see #cleanupSplitDir(FileSystem, Path) + */ + private static void createSplitDir(final FileSystem fs, final Path splitdir) + throws IOException { + if(!fs.exists(splitdir)) { + if (!fs.mkdirs(splitdir)) throw new IOException("Failed create of " + splitdir); + } + } + + private static void cleanupSplitDir(final FileSystem fs, final Path splitdir) + throws IOException { + deleteDir(fs, splitdir); + } + + /** + * @param fs Filesystem to use + * @param dir Directory to delete + * @throws IOException Thrown if we fail to delete passed dir + */ + private static void deleteDir(final FileSystem fs, final Path dir) + throws IOException { + if (FSUtils.deleteDirectory(fs, dir)) { + throw new IOException("Failed delete of " + dir); + } + } + + private void splitStoreFiles(final Path splitdir, + final List hstoreFilesToSplit) + throws IOException { + if (hstoreFilesToSplit == null) { + // Could be null because close didn't succeed -- for now consider it fatal + throw new IOException("Close returned empty list of StoreFiles"); + } + + // Split each store file. + for (StoreFile sf: hstoreFilesToSplit) { + splitStoreFile(sf, splitdir); + } + } + + private void splitStoreFile(final StoreFile sf, final Path splitdir) + throws IOException { + FileSystem fs = this.parent.getFilesystem(); + byte [] family = sf.getFamily(); + String encoded = this.hri_a.getEncodedName(); + Path storedir = Store.getStoreHomedir(splitdir, encoded, family); + StoreFile.split(fs, storedir, sf, this.splitrow, Range.bottom); + encoded = this.hri_b.getEncodedName(); + storedir = Store.getStoreHomedir(splitdir, encoded, family); + StoreFile.split(fs, storedir, sf, this.splitrow, Range.top); + } + + /** + * @param hri + * @return + * @throws IOException + * @see #cleanupDaughterRegion(FileSystem, Path, HRegionInfo) + */ + private HRegion createDaughterRegion(final HRegionInfo hri) + throws IOException { + FileSystem fs = this.parent.getFilesystem(); + Path regionDir = getSplitDirForDaughter(this.parent.getFilesystem(), + this.splitdir, hri); + HRegion r = HRegion.newHRegion(this.parent.getTableDir(), + this.parent.getLog(), fs, this.parent.getConf(), + hri, null); + HRegion.moveInitialFilesIntoPlace(fs, regionDir, r.getRegionDir()); + return r; + } + + private static void cleanupDaughterRegion(final FileSystem fs, + final Path tabledir, final String encodedName) + throws IOException { + Path regiondir = HRegion.getRegionDir(tabledir, encodedName); + deleteDir(fs, regiondir); + } + + + /* + * Get the daughter directories in the splits dir. The splits dir is under + * the parent regions' directory. + * @param fs + * @param splitdir + * @param hri + * @return Path to daughter split dir. + * @throws IOException + */ + private static Path getSplitDirForDaughter(final FileSystem fs, + final Path splitdir, final HRegionInfo hri) + throws IOException { + return new Path(splitdir, hri.getEncodedName()); + } + + private static void ensureDoesNotExist(final FileSystem fs, + final Path splitdir, final HRegionInfo hri) + throws IOException { + Path d = getSplitDirForDaughter(fs, splitdir, hri); + if (fs.exists(d)) { + throw new IOException("Cannot split; target file collision at " + d); + } + } + + /* + * @param r Parent region we want to edit. + * @return An HTable instance against the meta table that holds passed + * r; it has autoFlush enabled so we immediately send puts (No + * buffering enabled). + * @throws IOException + */ + private HTable getTable(final Configuration conf) throws IOException { + // When a region is split, the META table needs to updated if we're + // splitting a 'normal' region, and the ROOT table needs to be + // updated if we are splitting a META region. + HTable t = null; + if (this.parent.getRegionInfo().isMetaTable()) { + t = new HTable(conf, HConstants.ROOT_TABLE_NAME); + } else { + t = new HTable(conf, HConstants.META_TABLE_NAME); + } + // Flush puts as we send them -- no buffering. + t.setAutoFlush(true); + return t; + } + + + private Put createOfflineParentPut() throws IOException { + HRegionInfo editedParentRegionInfo = + new HRegionInfo(this.parent.getRegionInfo()); + editedParentRegionInfo.setOffline(true); + editedParentRegionInfo.setSplit(true); + Put put = new Put(editedParentRegionInfo.getRegionName()); + put.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, + Writables.getBytes(editedParentRegionInfo)); + put.add(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, + HConstants.EMPTY_BYTE_ARRAY); + put.add(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, + HConstants.EMPTY_BYTE_ARRAY); + put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITA_QUALIFIER, + Writables.getBytes(this.hri_a)); + put.add(HConstants.CATALOG_FAMILY, HConstants.SPLITB_QUALIFIER, + Writables.getBytes(this.hri_b)); + return put; + } + + private Put createDaughterPut(final HRegionInfo daughter) + throws IOException { + Put p = new Put(daughter.getRegionName()); + p.add(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER, + Writables.getBytes(daughter)); + return p; + } + + /** + * @param or Object that can online/offline parent region. Can be passed null + * by unit tests. + * @throws IOException If thrown, rollback failed. Take drastic action. + */ + public void rollback(final OnlineRegions or) throws IOException { + if (!this.parent.splitsAndClosesLock.writeLock().isHeldByCurrentThread()) { + throw new SplitAndCloseWriteLockNotHeld(); + } + FileSystem fs = this.parent.getFilesystem(); + ListIterator iterator = this.journal.listIterator(); + while (iterator.hasPrevious()) { + JournalEntry je = iterator.previous(); + switch(je) { + case CREATE_SPLIT_DIR: + cleanupSplitDir(fs, this.splitdir); + break; + + case CLOSED_PARENT_REGION: + // So, this returns a seqid but if we just closed and then reopened, we + // should be ok. On close, we flushed using sequenceid obtained from + // hosting regionserver so no need to propagate the sequenceid returned + // out of initialize below up into regionserver as we normally do. + // TODO: Verify. + this.parent.initialize(); + break; + + case STARTED_REGION_A_CREATION: + cleanupDaughterRegion(fs, this.parent.getTableDir(), + this.hri_a.getEncodedName()); + break; + + case STARTED_REGION_B_CREATION: + cleanupDaughterRegion(fs, this.parent.getTableDir(), + this.hri_b.getEncodedName()); + break; + + case SPLIT_AND_CLOSE_LOCK: + this.parent.splitsAndClosesLock.writeLock().unlock(); + break; + + case OFFLINED_PARENT: + if (or != null) or.addToOnlineRegions(this.parent); + break; + + default: + throw new RuntimeException("Unhandled journal entry: " + je); + } + } + } + + /** + * Thrown if lock not held. + */ + @SuppressWarnings("serial") + public class SplitAndCloseWriteLockNotHeld extends IOException {} + + HRegionInfo getFirstDaughter() { + return hri_a; + } + + HRegionInfo getSecondDaughter() { + return hri_b; + } + + + /** + * Clean up any split detritus that may have been left around from previous + * split attempts. + * Call this method on initial region deploy. Cleans up any mess + * left by previous deploys of passed r region. + * @param r + * @throws IOException + */ + static void cleanupAnySplitDetritus(final HRegion r) throws IOException { + Path splitdir = getSplitDir(r); + FileSystem fs = r.getFilesystem(); + if (!fs.exists(splitdir)) return; + // Look at the splitdir. It could have the enccoded names of the daughter + // regions we tried to make. If so, hosting server died before we had + // chance to clean them out, ergo, the split did not complete. Clean up + // the dirs. + FileStatus [] daughters = fs.listStatus(splitdir, new FSUtils.DirFilter(fs)); + for (int i = 0; i < daughters.length; i++) { + cleanupDaughterRegion(fs, r.getTableDir(), + daughters[i].getPath().getName()); + } + cleanupSplitDir(r.getFilesystem(), splitdir); + LOG.info("Cleaned up old failed split transaction detritus: " + splitdir); + } +} \ No newline at end of file diff --git a/src/test/java/org/apache/hadoop/hbase/io/TestImmutableBytesWritable.java b/src/test/java/org/apache/hadoop/hbase/io/TestImmutableBytesWritable.java index 43fa6dd..77c4506 100644 --- a/src/test/java/org/apache/hadoop/hbase/io/TestImmutableBytesWritable.java +++ b/src/test/java/org/apache/hadoop/hbase/io/TestImmutableBytesWritable.java @@ -40,6 +40,13 @@ public class TestImmutableBytesWritable extends TestCase { new ImmutableBytesWritable(Bytes.toBytes("xxabc"), 2, 2).hashCode()); } + public void testSpecificCompare() { + ImmutableBytesWritable ibw1 = new ImmutableBytesWritable(new byte[]{0x0f}); + ImmutableBytesWritable ibw2 = new ImmutableBytesWritable(new byte[]{0x00, 0x00}); + ImmutableBytesWritable.Comparator c = new ImmutableBytesWritable.Comparator(); + assertFalse("ibw1 < ibw2", c.compare( ibw1, ibw2 ) < 0 ); + } + public void testComparison() throws Exception { runTests("aa", "b", -1); runTests("aa", "aa", 0); diff --git a/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index 98bd3e5..e9bdb42 100644 --- a/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -50,9 +50,11 @@ import org.apache.hadoop.hbase.regionserver.HRegion.RegionScanner; import org.apache.hadoop.hbase.regionserver.wal.HLog; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge; import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.PairOfSameType; import org.apache.hadoop.hbase.util.Threads; import com.google.common.base.Joiner; @@ -1247,54 +1249,38 @@ public class TestHRegion extends HBaseTestCase { public void testMerge() throws IOException { byte [] tableName = Bytes.toBytes("testtable"); byte [][] families = {fam1, fam2, fam3}; - HBaseConfiguration hc = initSplit(); //Setting up region String method = this.getName(); initHRegion(tableName, method, hc, families); - try { LOG.info("" + addContent(region, fam3)); region.flushcache(); byte [] splitRow = region.compactStores(); assertNotNull(splitRow); LOG.info("SplitRow: " + Bytes.toString(splitRow)); - HRegion [] regions = split(region, splitRow); + HRegion [] subregions = splitRegion(region, splitRow); try { // Need to open the regions. - // TODO: Add an 'open' to HRegion... don't do open by constructing - // instance. - for (int i = 0; i < regions.length; i++) { - regions[i] = openClosedRegion(regions[i]); + for (int i = 0; i < subregions.length; i++) { + openClosedRegion(subregions[i]); + subregions[i].compactStores(); } Path oldRegionPath = region.getRegionDir(); + Path oldRegion1 = subregions[0].getRegionDir(); + Path oldRegion2 = subregions[1].getRegionDir(); long startTime = System.currentTimeMillis(); - HRegion subregions [] = region.splitRegion(splitRow); - if (subregions != null) { - LOG.info("Split region elapsed time: " - + ((System.currentTimeMillis() - startTime) / 1000.0)); - assertEquals("Number of subregions", subregions.length, 2); - for (int i = 0; i < subregions.length; i++) { - subregions[i] = openClosedRegion(subregions[i]); - subregions[i].compactStores(); - } - - // Now merge it back together - Path oldRegion1 = subregions[0].getRegionDir(); - Path oldRegion2 = subregions[1].getRegionDir(); - startTime = System.currentTimeMillis(); - region = HRegion.mergeAdjacent(subregions[0], subregions[1]); - LOG.info("Merge regions elapsed time: " + - ((System.currentTimeMillis() - startTime) / 1000.0)); - fs.delete(oldRegion1, true); - fs.delete(oldRegion2, true); - fs.delete(oldRegionPath, true); - } + region = HRegion.mergeAdjacent(subregions[0], subregions[1]); + LOG.info("Merge regions elapsed time: " + + ((System.currentTimeMillis() - startTime) / 1000.0)); + fs.delete(oldRegion1, true); + fs.delete(oldRegion2, true); + fs.delete(oldRegionPath, true); LOG.info("splitAndMerge completed."); } finally { - for (int i = 0; i < regions.length; i++) { + for (int i = 0; i < subregions.length; i++) { try { - regions[i].close(); + subregions[i].close(); } catch (IOException e) { // Ignore. } @@ -1308,6 +1294,38 @@ public class TestHRegion extends HBaseTestCase { } } + /** + * @param parent Region to split. + * @param midkey Key to split around. + * @return The Regions we created. + * @throws IOException + */ + HRegion [] splitRegion(final HRegion parent, final byte [] midkey) + throws IOException { + PairOfSameType result = null; + SplitTransaction st = new SplitTransaction(parent, midkey); + // If prepare does not return true, for some reason -- logged inside in + // the prepare call -- we are not ready to split just now. Just return. + if (!st.prepare()) return null; + try { + result = st.execute(null); + } catch (IOException ioe) { + try { + LOG.info("Running rollback of failed split of " + + parent.getRegionNameAsString() + "; " + ioe.getMessage()); + st.rollback(null); + LOG.info("Successful rollback of failed split of " + + parent.getRegionNameAsString()); + return null; + } catch (RuntimeException e) { + // If failed rollback, kill this server to avoid having a hole in table. + LOG.info("Failed rollback of failed split of " + + parent.getRegionNameAsString() + " -- aborting server", e); + } + } + return new HRegion [] {result.getFirst(), result.getSecond()}; + } + ////////////////////////////////////////////////////////////////////////////// // Scanner tests ////////////////////////////////////////////////////////////////////////////// @@ -2140,7 +2158,7 @@ public class TestHRegion extends HBaseTestCase { byte [] splitRow = region.compactStores(); assertNotNull(splitRow); LOG.info("SplitRow: " + Bytes.toString(splitRow)); - HRegion [] regions = split(region, splitRow); + HRegion [] regions = splitRegion(region, splitRow); try { // Need to open the regions. // TODO: Add an 'open' to HRegion... don't do open by constructing @@ -2180,7 +2198,7 @@ public class TestHRegion extends HBaseTestCase { for (int i = 0; i < regions.length; i++) { HRegion[] rs = null; if (midkeys[i] != null) { - rs = split(regions[i], midkeys[i]); + rs = splitRegion(regions[i], midkeys[i]); for (int j = 0; j < rs.length; j++) { sortedMap.put(Bytes.toString(rs[j].getRegionName()), openClosedRegion(rs[j])); @@ -2233,7 +2251,7 @@ public class TestHRegion extends HBaseTestCase { HRegion [] regions = null; try { - regions = region.splitRegion(Bytes.toBytes("" + splitRow)); + regions = splitRegion(region, Bytes.toBytes("" + splitRow)); //Opening the regions returned. for (int i = 0; i < regions.length; i++) { regions[i] = openClosedRegion(regions[i]); @@ -2784,15 +2802,6 @@ public class TestHRegion extends HBaseTestCase { } } - protected HRegion [] split(final HRegion r, final byte [] splitRow) - throws IOException { - // Assert can get mid key from passed region. - assertGet(r, fam3, splitRow); - HRegion [] regions = r.splitRegion(splitRow); - assertEquals(regions.length, 2); - return regions; - } - private HBaseConfiguration initSplit() { HBaseConfiguration conf = new HBaseConfiguration(); // Always compact if there is more than one store file. @@ -2827,6 +2836,11 @@ public class TestHRegion extends HBaseTestCase { } HRegionInfo info = new HRegionInfo(htd, null, null, false); Path path = new Path(DIR + callingMethod); + if (fs.exists(path)) { + if (!fs.delete(path, true)) { + throw new IOException("Failed delete of " + path); + } + } region = HRegion.createHRegion(info, path, conf); }