From cbcd0988ef06bb3f0fba9b06e4b0527793dcc08c Mon Sep 17 00:00:00 2001 From: Apekshit Sharma Date: Mon, 7 Nov 2016 13:59:21 -0800 Subject: [PATCH] - Moves most uses of createRegionAndWAL() to not use the path directly. Most testcases don't need to know the exact path of region, but they use it to ensure isolation among test functions by cleaning up directory in a @Before function. The solution here is to change the createRegionAndWal() function to use a random dir on each invocation. - Of the tests changed, these are failing ones: TestHRegion, TestRegionMergeTransaction,TestMergeTable. But it'll require another hard look to make sure that tests are not silently broken. For eg. it checks in a directory for no file, but since the directory has changed, it'll always pass. - Rename rfs to regionStorage Change-Id: Ib643326aa869a3551d381decad864936e7db6329 --- .../org/apache/hadoop/hbase/fs/RegionStorage.java | 6 +- .../apache/hadoop/hbase/regionserver/HRegion.java | 117 +++++++++------------ .../org/apache/hadoop/hbase/HBaseTestCase.java | 12 +-- .../apache/hadoop/hbase/HBaseTestingUtility.java | 31 +++--- .../hbase/client/TestIntraRowPagination.java | 6 +- .../coprocessor/TestCoprocessorInterface.java | 2 +- .../coprocessor/TestRegionObserverStacking.java | 14 +-- .../hbase/filter/TestColumnPrefixFilter.java | 9 +- .../hbase/filter/TestDependentColumnFilter.java | 3 +- .../org/apache/hadoop/hbase/filter/TestFilter.java | 9 +- .../hbase/filter/TestFilterFromRegionSide.java | 3 +- .../hbase/filter/TestInvocationRecordFilter.java | 3 +- .../filter/TestMultipleColumnPrefixFilter.java | 9 +- .../hadoop/hbase/io/encoding/TestPrefixTree.java | 8 +- .../io/encoding/TestSeekBeforeWithReverseScan.java | 3 +- .../hfile/TestScannerSelectionUsingKeyRange.java | 3 +- .../io/hfile/TestScannerSelectionUsingTTL.java | 3 +- .../hadoop/hbase/master/TestMasterFailover.java | 14 +-- .../hadoop/hbase/regionserver/TestBlocksRead.java | 31 ++---- .../regionserver/TestGetClosestAtOrBefore.java | 9 +- .../hadoop/hbase/regionserver/TestHRegion.java | 15 +-- .../regionserver/TestPerColumnFamilyFlush.java | 11 +- .../regionserver/TestRegionMergeTransaction.java | 4 +- .../hbase/regionserver/TestResettingCounters.java | 10 +- .../hadoop/hbase/regionserver/TestRowTooBig.java | 9 +- .../TestWalAndCompactingMemStoreFlush.java | 11 +- .../hbase/regionserver/wal/AbstractTestFSWAL.java | 3 +- .../apache/hadoop/hbase/util/TestMergeTable.java | 37 +++---- .../apache/hadoop/hbase/util/TestMergeTool.java | 4 +- 29 files changed, 153 insertions(+), 246 deletions(-) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/RegionStorage.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/RegionStorage.java index 350e16c..1624332 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/RegionStorage.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/RegionStorage.java @@ -252,14 +252,14 @@ public abstract class RegionStorage { LOG.debug("skipping override of the default FS, since the root container is not a LegacyPathIdentifier."); } - RegionStorage rfs = getInstance(conf, fs, rootContainer, regionInfo); + RegionStorage regionStorage = getInstance(conf, fs, rootContainer, regionInfo); if (bootstrap) { // TODO: are bootstrap and create two different things? // should switch to bootstrap & read-only // legacy region wants to recover the .regioninfo :( - rfs.bootstrap(); + regionStorage.bootstrap(); } - return rfs; + return regionStorage; } public static void destroy(Configuration conf, HRegionInfo regionInfo) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index d0a4c94..b2bf546 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -290,7 +290,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi final AtomicLong compactionNumBytesCompacted = new AtomicLong(0L); private final WAL wal; - private final RegionStorage fs; + private final RegionStorage regionStorage; protected final Configuration conf; private final Configuration baseConf; private final int rowLockWaitDuration; @@ -601,53 +601,21 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi * extensions. Instances of HRegion should be instantiated with the * {@link HRegion#createHRegion} or {@link HRegion#openHRegion} method. * - * @param tableDir qualified path of directory where region should be located, - * usually the table directory. * @param wal The WAL is the outbound log for any updates to the HRegion * The wal file is a logfile from the previous execution that's * custom-computed for this HRegion. The HRegionServer computes and sorts the * appropriate wal info for this HRegion. If there is a previous wal file * (implying that the HRegion has been written-to before), then read it from * the supplied path. - * @param fs is the filesystem. - * @param confParam is global configuration settings. - * @param regionInfo - HRegionInfo that describes the region - * is new), then read them from the supplied path. * @param htd the table descriptor * @param rsServices reference to {@link RegionServerServices} or null - * @deprecated Use other constructors. */ - @Deprecated - @VisibleForTesting - private HRegion(final Path tableDir, final WAL wal, final FileSystem fs, - final Configuration confParam, final HRegionInfo regionInfo, - final HTableDescriptor htd, final RegionServerServices rsServices) - throws IOException { - this(RegionStorage.open(confParam, fs, new LegacyPathIdentifier(tableDir), regionInfo, false), htd, wal, rsServices); - } - - /** - * HRegion constructor. This constructor should only be used for testing and - * extensions. Instances of HRegion should be instantiated with the - * {@link HRegion#createHRegion} or {@link HRegion#openHRegion} method. - * - * @param fs is the filesystem. - * @param wal The WAL is the outbound log for any updates to the HRegion - * The wal file is a logfile from the previous execution that's - * custom-computed for this HRegion. The HRegionServer computes and sorts the - * appropriate wal info for this HRegion. If there is a previous wal file - * (implying that the HRegion has been written-to before), then read it from - * the supplied path. - * @param confParam is global configuration settings. - * @param htd the table descriptor - * @param rsServices reference to {@link RegionServerServices} or null - */ - public HRegion(final RegionStorage rfs, final HTableDescriptor htd, final WAL wal, + public HRegion(final RegionStorage regionStorage, final HTableDescriptor htd, final WAL wal, final RegionServerServices rsServices) { - this(rfs, wal, rfs.getConfiguration(), htd, rsServices); + this(regionStorage, wal, regionStorage.getConfiguration(), htd, rsServices); } - private HRegion(final RegionStorage fs, final WAL wal, final Configuration confParam, + private HRegion(final RegionStorage regionStorage, final WAL wal, final Configuration confParam, final HTableDescriptor htd, final RegionServerServices rsServices) { if (htd == null) { throw new IllegalArgumentException("Need table descriptor"); @@ -658,7 +626,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } this.wal = wal; - this.fs = fs; + this.regionStorage = regionStorage; // 'conf' renamed to 'confParam' b/c we use this.conf in the constructor this.baseConf = confParam; @@ -827,14 +795,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // Write HRI to a file in case we need to recover hbase:meta status.setStatus("Writing region info on filesystem"); - fs.checkRegionInfoOnFilesystem(); + regionStorage.checkRegionInfoOnFilesystem(); // Initialize all the HStores status.setStatus("Initializing all the Stores"); long maxSeqId = initializeStores(reporter, status); this.mvcc.advanceTo(maxSeqId); if (ServerRegionReplicaUtil.shouldReplayRecoveredEdits(this)) { - final StorageIdentifier regionContainer = this.fs.getRegionContainer(); + final StorageIdentifier regionContainer = this.regionStorage.getRegionContainer(); /* * TODO either move wal replay stuff to not rely on details from RegionStorage, * implement a WALStorage abstraction, or make a "Recovered Edits visitor". @@ -842,12 +810,13 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi if (regionContainer instanceof LegacyPathIdentifier) { // Recover any edits if available. maxSeqId = Math.max(maxSeqId, - replayRecoveredEditsIfAny(((LegacyPathIdentifier)this.fs.getRegionContainer()).path, maxSeqIdInStores, + replayRecoveredEditsIfAny(((LegacyPathIdentifier)this.regionStorage.getRegionContainer()).path, maxSeqIdInStores, reporter, status)); // Make sure mvcc is up to max. this.mvcc.advanceTo(maxSeqId); } else { - LOG.debug("Skipping check for recovered edits, because RegionStorage implementation '" + this.fs.getClass() + + LOG.debug("Skipping check for recovered edits, because RegionStorage implementation '" + this.regionStorage + .getClass() + "' doesn't return Paths for the region container."); } } @@ -860,14 +829,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi if (this.writestate.writesEnabled) { // Remove temporary data left over from old regions status.setStatus("Cleaning up temporary data from old regions"); - fs.cleanupTempContainer(); + regionStorage.cleanupTempContainer(); status.setStatus("Cleaning up detritus from prior splits"); // Get rid of any splits or merges that were lost in-progress. Clean out // these directories here on open. We may be opening a region that was // being split but we crashed in the middle of it all. - fs.cleanupAnySplitDetritus(); - fs.cleanupMergesContainer(); + regionStorage.cleanupAnySplitDetritus(); + regionStorage.cleanupMergesContainer(); } // Initialize split policy @@ -889,17 +858,17 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // is opened before recovery completes. So we add a safety bumper to avoid new sequence number // overlaps used sequence numbers if (this.writestate.writesEnabled) { - final StorageIdentifier regionContainer = this.fs.getRegionContainer(); + final StorageIdentifier regionContainer = this.regionStorage.getRegionContainer(); /* * TODO more WAL replay stuff that needs to get pulled out of the notion of region storage */ if (regionContainer instanceof LegacyPathIdentifier) { - nextSeqid = WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(), + nextSeqid = WALSplitter.writeRegionSequenceIdFile(this.regionStorage.getFileSystem(), ((LegacyPathIdentifier)regionContainer).path, nextSeqid, (this.recovering ? (this.flushPerChanges + 10000000) : 1)); } else { LOG.debug("Skipping region sequence id checkpointing, because RegionStorage implementation '" + - this.fs.getClass() + "' doesn't return Paths for the region container."); + this.regionStorage.getClass() + "' doesn't return Paths for the region container."); nextSeqid++; } } else { @@ -1051,7 +1020,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi WALUtil.writeRegionEventMarker(wal, getReplicationScope(), getRegionInfo(), regionEventDesc, mvcc); - final StorageIdentifier regionContainer = this.fs.getRegionContainer(); + final StorageIdentifier regionContainer = this.regionStorage.getRegionContainer(); /* * TODO more WAL stuff to move out of region storage */ @@ -1059,13 +1028,13 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // Store SeqId in HDFS when a region closes // checking region folder exists is due to many tests which delete the table folder while a // table is still online - if (this.fs.getFileSystem().exists(((LegacyPathIdentifier)regionContainer).path)) { - WALSplitter.writeRegionSequenceIdFile(this.fs.getFileSystem(), ((LegacyPathIdentifier)regionContainer).path, + if (this.regionStorage.getFileSystem().exists(((LegacyPathIdentifier)regionContainer).path)) { + WALSplitter.writeRegionSequenceIdFile(this.regionStorage.getFileSystem(), ((LegacyPathIdentifier)regionContainer).path, mvcc.getReadPoint(), 0); } } else { LOG.debug("skipping WAL sequence ID checkpointing because the RegionStorage implementation, '" + - this.fs.getClass() + "' doesn't return Paths for the region container."); + this.regionStorage.getClass() + "' doesn't return Paths for the region container."); } } @@ -1175,7 +1144,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi @Override public HRegionInfo getRegionInfo() { - return this.fs.getRegionInfo(); + return this.regionStorage.getRegionInfo(); } /** @@ -1741,12 +1710,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi /** @return {@link FileSystem} being used by this region */ public FileSystem getFilesystem() { - return fs.getFileSystem(); + return regionStorage.getFileSystem(); } /** @return the {@link RegionStorage} used by this region */ public RegionStorage getRegionStorage() { - return this.fs; + return this.regionStorage; } @Override @@ -4011,7 +3980,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } long seqid = minSeqIdForTheRegion; - FileSystem fs = this.fs.getFileSystem(); + FileSystem fs = this.regionStorage.getFileSystem(); NavigableSet files = WALSplitter.getSplitEditFilesSorted(fs, regiondir); if (LOG.isDebugEnabled()) { LOG.debug("Found " + (files == null ? 0 : files.size()) @@ -4113,7 +4082,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi String msg = "Replaying edits from " + edits; LOG.info(msg); MonitoredTask status = TaskMonitor.get().createStatus(msg); - FileSystem fs = this.fs.getFileSystem(); + FileSystem fs = this.regionStorage.getFileSystem(); status.setStatus("Opening recovered edits"); WAL.Reader reader = null; @@ -4944,7 +4913,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi for (String storeFile : storeFiles) { StoreFileInfo storeFileInfo = null; try { - storeFileInfo = fs.getStoreFileInfo(Bytes.toString(family), storeFile); + storeFileInfo = regionStorage.getStoreFileInfo(Bytes.toString(family), storeFile); store.bulkLoadHFile(storeFileInfo); } catch(FileNotFoundException ex) { LOG.warn(getRegionInfo().getEncodedName() + " : " @@ -5119,7 +5088,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi if (!RegionReplicaUtil.isDefaultReplica(this.getRegionInfo()) && Bytes.equals(encodedRegionName, - this.fs.getRegionInfoForFS().getEncodedNameAsBytes())) { + this.regionStorage.getRegionInfoForFS().getEncodedNameAsBytes())) { return; } @@ -5143,7 +5112,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } /* - * @param fs * @param p File to check. * @return True if file was zero-length (and if so, we'll delete it in here). * @throws IOException @@ -5557,7 +5525,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi // Note the size of the store file try { - storeFilesSizes.put(name, this.fs.getStoreFileLen(commitedStoreFile)); + storeFilesSizes.put(name, this.regionStorage.getStoreFileLen(commitedStoreFile)); } catch (IOException e) { LOG.warn("Failed to find the size of hfile " + commitedStoreFile); storeFilesSizes.put(name, 0L); @@ -6343,10 +6311,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi return newHRegion(rfs, htd, wal, rsServices); } - private static HRegion newHRegion(RegionStorage rfs, HTableDescriptor htd, WAL wal, + private static HRegion newHRegion(RegionStorage regionStorage, HTableDescriptor htd, WAL wal, RegionServerServices rsServices) throws IOException { try { - Configuration conf = rfs.getConfiguration(); + Configuration conf = regionStorage.getConfiguration(); @SuppressWarnings("unchecked") Class regionClass = @@ -6356,7 +6324,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi regionClass.getConstructor(RegionStorage.class, HTableDescriptor.class, WAL.class, RegionServerServices.class); - return c.newInstance(rfs, htd, wal, rsServices); + return c.newInstance(regionStorage, htd, wal, rsServices); } catch (Throwable e) { // todo: what should I throw here? throw new IllegalStateException("Could not instantiate a region instance.", e); @@ -6367,7 +6335,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi * Convenience method creating new HRegions. Used by createTable. * * @param info Info for region to create. - * @param rootDir Root directory for HBase instance * @param wal shared WAL * @param initialize - true to initialize the region * @return new HRegion @@ -6379,8 +6346,22 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi LOG.info("creating HRegion " + info.getTable().getNameAsString() + " HTD == " + hTableDescriptor + " Table name == " + info.getTable().getNameAsString()); - RegionStorage rfs = RegionStorage.open(conf, info, true); - HRegion region = HRegion.newHRegion(rfs, hTableDescriptor, wal, null); + RegionStorage regionStorage = RegionStorage.open(conf, info, true); + HRegion region = HRegion.newHRegion(regionStorage, hTableDescriptor, wal, null); + if (initialize) region.initialize(null); + return region; + } + + @VisibleForTesting + public static HRegion createHRegion(final Configuration conf, + final HTableDescriptor hTableDescriptor, final HRegionInfo info, + final Path dir, final WAL wal, final boolean initialize) throws IOException { + LOG.info("creating HRegion " + info.getTable().getNameAsString() + + " HTD == " + hTableDescriptor + + " Table name == " + info.getTable().getNameAsString()); + RegionStorage regionStorage = RegionStorage.open(conf, FSUtils.getCurrentFileSystem(conf), + new LegacyPathIdentifier(dir), info, true); + HRegion region = HRegion.newHRegion(regionStorage, hTableDescriptor, wal, null); if (initialize) region.initialize(null); return region; } @@ -6552,7 +6533,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi */ HRegion createDaughterRegionFromSplits(final HRegionInfo hri) throws IOException { // Move the files from the temporary .splits to the final /table/region directory - fs.commitDaughterRegion(hri); + regionStorage.commitDaughterRegion(hri); // Create the daughter HRegion instance HRegion r = HRegion.newHRegion(this.getBaseConf(), @@ -6580,7 +6561,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi r.writeRequestsCount.add(this.getWriteRequestsCount() + region_b.getWriteRequestsCount()); - this.fs.commitMergedRegion(mergedRegionInfo); + this.regionStorage.commitMergedRegion(mergedRegionInfo); return r; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java index 153f36b..7dfe74a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestCase.java @@ -162,7 +162,7 @@ public abstract class HBaseTestCase extends TestCase { byte [] endKey, Configuration conf) throws IOException { HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey); - return HBaseTestingUtility.createRegionAndWAL(hri, testDir, conf, desc); + return testUtil.createRegionAndWAL(hri, desc, conf, testDir); } protected HRegion openClosedRegion(final HRegion closedRegion) @@ -171,7 +171,7 @@ public abstract class HBaseTestCase extends TestCase { } /** - * Create a table of name name with {@link COLUMNS} for + * Create a table of name name with {@link #COLUMNS} for * families. * @param name Name to give table. * @return Column descriptor. @@ -181,7 +181,7 @@ public abstract class HBaseTestCase extends TestCase { } /** - * Create a table of name name with {@link COLUMNS} for + * Create a table of name name with {@link #COLUMNS} for * families. * @param name Name to give table. * @param versions How many versions to allow per column. @@ -194,7 +194,7 @@ public abstract class HBaseTestCase extends TestCase { } /** - * Create a table of name name with {@link COLUMNS} for + * Create a table of name name with {@link #COLUMNS} for * families. * @param name Name to give table. * @param versions How many versions to allow per column. @@ -422,8 +422,8 @@ public abstract class HBaseTestCase extends TestCase { */ protected void createMetaRegion() throws IOException { FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(conf); - meta = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, testDir, - conf, fsTableDescriptors.get(TableName.META_TABLE_NAME)); + meta = testUtil.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, + fsTableDescriptors.get(TableName.META_TABLE_NAME), conf, testDir); } protected void closeRootAndMeta() throws IOException { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index 45d8aae..a6e571c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1827,7 +1827,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} when you're finished with it. */ public HRegion createLocalHRegion(HRegionInfo info, HTableDescriptor desc) throws IOException { - return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), desc); + return createRegionAndWAL(info, desc); } /** @@ -2312,11 +2312,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { * Create rows in hbase:meta for regions of the specified table with the specified * start keys. The first startKey should be a 0 length byte array if you * want to form a proper range of regions. - * @param conf - * @param htd - * @param startKeys * @return list of region info for regions added to meta - * @throws IOException */ public List createMultiRegionsInMeta(final Configuration conf, final HTableDescriptor htd, byte [][] startKeys) @@ -2357,22 +2353,27 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { /** * Create a region with it's own WAL. Be sure to call * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources. + * Uses random directory on each call. */ - public static HRegion createRegionAndWAL(final HRegionInfo info, final Path rootDir, - final Configuration conf, final HTableDescriptor htd) throws IOException { - return createRegionAndWAL(info, rootDir, conf, htd, true); + public HRegion createRegionAndWAL(final HRegionInfo info, final HTableDescriptor htd) + throws IOException { + return createRegionAndWAL(info, htd, conf); + } + + public HRegion createRegionAndWAL(final HRegionInfo info, final HTableDescriptor htd, + Configuration conf) throws IOException { + // TODO (bug): if local fs, returns same dir. + return createRegionAndWAL(info, htd, conf, getNewDataTestDirOnTestFS()); } /** * Create a region with it's own WAL. Be sure to call * {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} to clean up all resources. */ - public static HRegion createRegionAndWAL(final HRegionInfo info, final Path rootDir, - final Configuration conf, final HTableDescriptor htd, boolean initialize) - throws IOException { - WAL wal = createWal(conf, rootDir, info); -// return HRegion.createHRegion(conf, rootDir, htd, info, wal, initialize); - return null; + public HRegion createRegionAndWAL(final HRegionInfo info, final HTableDescriptor htd, + Configuration conf, Path dir) throws IOException { + WAL wal = createWal(conf, dir, info); + return HRegion.createHRegion(conf, htd, info, dir, wal, true); } /** @@ -3842,7 +3843,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(TableName.valueOf(tableName), null, null, false); - return createRegionAndWAL(info, getDataTestDir(), getConfiguration(), htd); + return createRegionAndWAL(info, htd); } public void setFileSystemURI(String fsURI) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java index 1f6dc98..6a792e3 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestIntraRowPagination.java @@ -44,12 +44,9 @@ public class TestIntraRowPagination { /** * Test from client side for scan with maxResultPerCF set - * - * @throws Exception */ @Test public void testScanLimitAndOffset() throws Exception { - //byte [] TABLE = HTestConst.DEFAULT_TABLE_BYTES; byte [][] ROWS = HTestConst.makeNAscii(HTestConst.DEFAULT_ROW_BYTES, 2); byte [][] FAMILIES = HTestConst.makeNAscii(HTestConst.DEFAULT_CF_BYTES, 3); byte [][] QUALIFIERS = HTestConst.makeNAscii(HTestConst.DEFAULT_QUALIFIER_BYTES, 10); @@ -60,8 +57,7 @@ public class TestIntraRowPagination { HColumnDescriptor hcd = new HColumnDescriptor(family); htd.addFamily(hcd); } - HRegion region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), - TEST_UTIL.getConfiguration(), htd); + HRegion region = TEST_UTIL.createRegionAndWAL(info, htd); try { Put put; Scan scan; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java index b2ef1bd..402c180 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java @@ -455,7 +455,7 @@ public class TestCoprocessorInterface { } HRegionInfo info = new HRegionInfo(tableName, null, null, false); Path path = new Path(DIR + callingMethod); - Region r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd); + Region r = TEST_UTIL.createRegionAndWAL(info, htd, conf, path); // this following piece is a hack. RegionCoprocessorHost host = new RegionCoprocessorHost(r, null, conf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java index 723edcb..80d9fc1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverStacking.java @@ -44,9 +44,7 @@ import org.junit.experimental.categories.Category; @Category({CoprocessorTests.class, SmallTests.class}) public class TestRegionObserverStacking extends TestCase { - private static HBaseTestingUtility TEST_UTIL - = new HBaseTestingUtility(); - static final Path DIR = TEST_UTIL.getDataTestDir(); + private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); public static class ObserverA extends BaseRegionObserver { long id; @@ -94,15 +92,14 @@ public class TestRegionObserverStacking extends TestCase { } } - HRegion initHRegion (byte [] tableName, String callingMethod, - Configuration conf, byte [] ... families) throws IOException { + HRegion initHRegion(byte [] tableName, Configuration conf, byte [] ... families) + throws IOException { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); for(byte [] family : families) { htd.addFamily(new HColumnDescriptor(family)); } HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); - Path path = new Path(DIR + callingMethod); - HRegion r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd); + HRegion r = TEST_UTIL.createRegionAndWAL(info, htd); // this following piece is a hack. currently a coprocessorHost // is secretly loaded at OpenRegionHandler. we don't really // start a region server here, so just manually create cphost @@ -119,8 +116,7 @@ public class TestRegionObserverStacking extends TestCase { byte[][] FAMILIES = new byte[][] { A } ; Configuration conf = HBaseConfiguration.create(); - HRegion region = initHRegion(TABLE, getClass().getName(), - conf, FAMILIES); + HRegion region = initHRegion(TABLE, conf, FAMILIES); RegionCoprocessorHost h = region.getCoprocessorHost(); h.load(ObserverA.class, Coprocessor.PRIORITY_HIGHEST, conf); h.load(ObserverB.class, Coprocessor.PRIORITY_USER, conf); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java index 828842d..04db22e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPrefixFilter.java @@ -49,8 +49,7 @@ import org.junit.experimental.categories.Category; @Category({FilterTests.class, SmallTests.class}) public class TestColumnPrefixFilter { - private final static HBaseTestingUtility TEST_UTIL = new - HBaseTestingUtility(); + private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @Test public void testColumnPrefixFilter() throws IOException { @@ -58,8 +57,7 @@ public class TestColumnPrefixFilter { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestColumnPrefixFilter")); htd.addFamily((new HColumnDescriptor(family)).setMaxVersions(3)); HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); - HRegion region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), - TEST_UTIL.getConfiguration(), htd); + HRegion region = TEST_UTIL.createRegionAndWAL(info, htd); try { List rows = generateRandomWords(100, "row"); List columns = generateRandomWords(10000, "column"); @@ -121,8 +119,7 @@ public class TestColumnPrefixFilter { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestColumnPrefixFilter")); htd.addFamily((new HColumnDescriptor(family)).setMaxVersions(3)); HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); - HRegion region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), - TEST_UTIL.getConfiguration(), htd); + HRegion region = TEST_UTIL.createRegionAndWAL(info, htd); try { List rows = generateRandomWords(100, "row"); List columns = generateRandomWords(10000, "column"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java index 3a635b8..bd409d9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestDependentColumnFilter.java @@ -87,8 +87,7 @@ public class TestDependentColumnFilter { hcd1.setMaxVersions(3); htd.addFamily(hcd1); HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); - this.region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), - TEST_UTIL.getConfiguration(), htd); + this.region = TEST_UTIL.createRegionAndWAL(info, htd); addData(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java index 6b22164..8cd7b99 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilter.java @@ -144,8 +144,7 @@ public class TestFilter { htd.addFamily(new HColumnDescriptor(NEW_FAMILIES[0])); htd.addFamily(new HColumnDescriptor(NEW_FAMILIES[1])); HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); - this.region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), - TEST_UTIL.getConfiguration(), htd); + this.region = TEST_UTIL.createRegionAndWAL(info, htd); // Insert first half for(byte [] ROW : ROWS_ONE) { @@ -1493,8 +1492,7 @@ public class TestFilter { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestFilter")); htd.addFamily(new HColumnDescriptor(family)); HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); - Region testRegion = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), - TEST_UTIL.getConfiguration(), htd); + Region testRegion = TEST_UTIL.createRegionAndWAL(info, htd); for(int i=0; i<5; i++) { Put p = new Put(Bytes.toBytes((char)('a'+i) + "row")); @@ -2054,8 +2052,7 @@ public class TestFilter { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("testNestedFilterListWithSCVF")); htd.addFamily(new HColumnDescriptor(FAMILIES[0])); HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); - Region testRegion = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), - TEST_UTIL.getConfiguration(), htd); + Region testRegion = TEST_UTIL.createRegionAndWAL(info, htd); for(int i=0; i<10; i++) { Put p = new Put(Bytes.toBytes("row" + i)); p.setDurability(Durability.SKIP_WAL); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java index 0a287ce..4d87696 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java @@ -83,8 +83,7 @@ public class TestFilterFromRegionSide { htd.addFamily(hcd); } HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); - REGION = HBaseTestingUtility - .createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), TEST_UTIL.getConfiguration(), htd); + REGION = TEST_UTIL.createRegionAndWAL(info, htd); for(Put put:createPuts(ROWS, FAMILIES, QUALIFIERS, VALUE)){ REGION.put(put); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java index 8291e52..32cbb4b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestInvocationRecordFilter.java @@ -69,8 +69,7 @@ public class TestInvocationRecordFilter { TableName.valueOf(TABLE_NAME_BYTES)); htd.addFamily(new HColumnDescriptor(FAMILY_NAME_BYTES)); HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); - this.region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), - TEST_UTIL.getConfiguration(), htd); + this.region = TEST_UTIL.createRegionAndWAL(info, htd); Put put = new Put(ROW_BYTES); for (int i = 0; i < 10; i += 2) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java index 7b700b7..42fa7e2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestMultipleColumnPrefixFilter.java @@ -61,8 +61,7 @@ public class TestMultipleColumnPrefixFilter { htd.addFamily(hcd); // HRegionInfo info = new HRegionInfo(htd, null, null, false); HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); - HRegion region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL. - getDataTestDir(), TEST_UTIL.getConfiguration(), htd); + HRegion region = TEST_UTIL.createRegionAndWAL(info, htd); List rows = generateRandomWords(100, "row"); List columns = generateRandomWords(10000, "column"); @@ -128,8 +127,7 @@ public class TestMultipleColumnPrefixFilter { hcd2.setMaxVersions(3); htd.addFamily(hcd2); HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); - HRegion region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL. - getDataTestDir(), TEST_UTIL.getConfiguration(), htd); + HRegion region = TEST_UTIL.createRegionAndWAL(info, htd); List rows = generateRandomWords(100, "row"); List columns = generateRandomWords(10000, "column"); @@ -195,8 +193,7 @@ public class TestMultipleColumnPrefixFilter { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("TestMultipleColumnPrefixFilter")); htd.addFamily(new HColumnDescriptor(family)); HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); - HRegion region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL. - getDataTestDir(), TEST_UTIL.getConfiguration(), htd); + HRegion region = TEST_UTIL.createRegionAndWAL(info, htd); List rows = generateRandomWords(100, "row"); List columns = generateRandomWords(10000, "column"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java index e31a73b..3d8d971 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestPrefixTree.java @@ -26,7 +26,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -49,6 +48,7 @@ import org.junit.experimental.categories.Category; @Category({ IOTests.class, SmallTests.class }) public class TestPrefixTree { + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static final String row4 = "a-b-B-2-1402397300-1402416535"; private static final byte[] row4_bytes = Bytes.toBytes(row4); @@ -63,7 +63,6 @@ public class TestPrefixTree { private final static byte[] qual1 = Bytes.toBytes("qf_1"); private final static byte[] qual2 = Bytes.toBytes("qf_2"); - private final HBaseTestingUtility testUtil = new HBaseTestingUtility(); private Region region; @@ -73,14 +72,13 @@ public class TestPrefixTree { HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(fam).setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE)); HRegionInfo info = new HRegionInfo(tableName, null, null, false); - Path path = testUtil.getDataTestDir(getClass().getSimpleName()); - region = HBaseTestingUtility.createRegionAndWAL(info, path, testUtil.getConfiguration(), htd); + region = TEST_UTIL.createRegionAndWAL(info, htd); } @After public void tearDown() throws Exception { HBaseTestingUtility.closeRegionAndWAL(region); - testUtil.cleanupTestDir(); + TEST_UTIL.cleanupTestDir(); } @Test diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java index 2826694..2a7b30c 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestSeekBeforeWithReverseScan.java @@ -59,8 +59,7 @@ public class TestSeekBeforeWithReverseScan { HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(cfName).setDataBlockEncoding(DataBlockEncoding.FAST_DIFF)); HRegionInfo info = new HRegionInfo(tableName, null, null, false); - Path path = testUtil.getDataTestDir(getClass().getSimpleName()); - region = HBaseTestingUtility.createRegionAndWAL(info, path, testUtil.getConfiguration(), htd); + region = testUtil.createRegionAndWAL(info, htd); } @After diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java index 9c6bb38..7b0b233 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java @@ -99,8 +99,7 @@ public class TestScannerSelectionUsingKeyRange { HTableDescriptor htd = new HTableDescriptor(TABLE); htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(TABLE); - Region region = HBaseTestingUtility.createRegionAndWAL(info, TEST_UTIL.getDataTestDir(), conf, - htd); + Region region = TEST_UTIL.createRegionAndWAL(info, htd, conf); for (int iFile = 0; iFile < NUM_FILES; ++iFile) { for (int iRow = 0; iRow < NUM_ROWS; ++iRow) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java index 08b259d..13fbc86 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java @@ -106,8 +106,7 @@ public class TestScannerSelectionUsingTTL { HTableDescriptor htd = new HTableDescriptor(TABLE); htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(TABLE); - Region region = HBaseTestingUtility.createRegionAndWAL(info, - TEST_UTIL.getDataTestDir(info.getEncodedName()), conf, htd); + Region region = TEST_UTIL.createRegionAndWAL(info, htd, conf); long ts = EnvironmentEdgeManager.currentTime(); long version = 0; //make sure each new set of Put's have a new ts diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java index a20327d..1f6f40b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase.master; +import static org.bouncycastle.asn1.x500.style.RFC4519Style.c; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -62,10 +63,9 @@ import org.junit.experimental.categories.Category; public class TestMasterFailover { private static final Log LOG = LogFactory.getLog(TestMasterFailover.class); - HRegion createRegion(final HRegionInfo hri, final Path rootdir, final Configuration c, - final HTableDescriptor htd) + HRegion createRegion(HBaseTestingUtility hbtu, final HRegionInfo hri, final HTableDescriptor htd) throws IOException { - HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, rootdir, c, htd); + HRegion r = hbtu.createRegionAndWAL(hri, htd); // The above call to create a region will create an wal file. Each // log file create will also create a running thread to do syncing. We need // to close out this log else we will have a running thread trying to sync @@ -233,7 +233,7 @@ public class TestMasterFailover { fstd.createTableDescriptor(offlineTable); HRegionInfo hriOffline = new HRegionInfo(offlineTable.getTableName(), null, null); - createRegion(hriOffline, rootdir, conf, offlineTable); + createRegion(TEST_UTIL, hriOffline, offlineTable); MetaTableAccessor.addRegionToMeta(master.getConnection(), hriOffline); log("Regions in hbase:meta and namespace have been created"); @@ -263,7 +263,7 @@ public class TestMasterFailover { stateStore.updateRegionState(HConstants.NO_SEQNUM, newState, oldState); HRegionInfo failedClose = new HRegionInfo(offlineTable.getTableName(), null, null); - createRegion(failedClose, rootdir, conf, offlineTable); + createRegion(TEST_UTIL, failedClose, offlineTable); MetaTableAccessor.addRegionToMeta(master.getConnection(), failedClose); oldState = new RegionState(failedClose, State.PENDING_CLOSE); @@ -271,7 +271,7 @@ public class TestMasterFailover { stateStore.updateRegionState(HConstants.NO_SEQNUM, newState, oldState); HRegionInfo failedOpen = new HRegionInfo(offlineTable.getTableName(), null, null); - createRegion(failedOpen, rootdir, conf, offlineTable); + createRegion(TEST_UTIL, failedOpen, offlineTable); MetaTableAccessor.addRegionToMeta(master.getConnection(), failedOpen); // Simulate a region transitioning to failed open when the region server reports the @@ -282,7 +282,7 @@ public class TestMasterFailover { HRegionInfo failedOpenNullServer = new HRegionInfo(offlineTable.getTableName(), null, null); LOG.info("Failed open NUll server " + failedOpenNullServer.getEncodedName()); - createRegion(failedOpenNullServer, rootdir, conf, offlineTable); + createRegion(TEST_UTIL, failedOpenNullServer, offlineTable); MetaTableAccessor.addRegionToMeta(master.getConnection(), failedOpenNullServer); // Simulate a region transitioning to failed open when the master couldn't find a plan for diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java index edd7847..60b087a 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java @@ -27,7 +27,6 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -59,14 +58,13 @@ public class TestBlocksRead { private static final Log LOG = LogFactory.getLog(TestBlocksRead.class); @Rule public TestName testName = new TestName(); - static final BloomType[] BLOOM_TYPE = new BloomType[] { BloomType.ROWCOL, + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static BlockCache blockCache; + + private static final BloomType[] BLOOM_TYPE = new BloomType[] { BloomType.ROWCOL, BloomType.ROW, BloomType.NONE }; - private static BlockCache blockCache; Region region = null; - private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private final String DIR = TEST_UTIL.getDataTestDir("TestBlocksRead").toString(); - private Configuration conf = TEST_UTIL.getConfiguration(); @BeforeClass public static void setUp() throws Exception { @@ -81,15 +79,9 @@ public class TestBlocksRead { /** * Callers must afterward call {@link HBaseTestingUtility#closeRegionAndWAL(HRegion)} - * @param tableName - * @param callingMethod - * @param conf - * @param family - * @throws IOException * @return created and initialized region. */ - private Region initHRegion(byte[] tableName, String callingMethod, - Configuration conf, String family) throws IOException { + private Region initHRegion(byte[] tableName, String family) throws IOException { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor familyDesc; for (int i = 0; i < BLOOM_TYPE.length; i++) { @@ -101,9 +93,8 @@ public class TestBlocksRead { } HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); - Path path = new Path(DIR + callingMethod); - Region r = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd); - blockCache = new CacheConfig(conf).getBlockCache(); + Region r = TEST_UTIL.createRegionAndWAL(info, htd); + blockCache = new CacheConfig(TEST_UTIL.getConfiguration()).getBlockCache(); return r; } @@ -213,7 +204,7 @@ public class TestBlocksRead { byte[] TABLE = Bytes.toBytes("testBlocksRead"); String FAMILY = "cf1"; Cell kvs[]; - this.region = initHRegion(TABLE, testName.getMethodName(), conf, FAMILY); + this.region = initHRegion(TABLE, FAMILY); try { putData(FAMILY, "row", "col1", 1); @@ -269,7 +260,7 @@ public class TestBlocksRead { byte[] TABLE = Bytes.toBytes("testLazySeekBlocksRead"); String FAMILY = "cf1"; Cell kvs[]; - this.region = initHRegion(TABLE, testName.getMethodName(), conf, FAMILY); + this.region = initHRegion(TABLE, FAMILY); try { // File 1 @@ -376,7 +367,7 @@ public class TestBlocksRead { byte [] TABLE = Bytes.toBytes("testBlocksReadWhenCachingDisabled"); String FAMILY = "cf1"; - this.region = initHRegion(TABLE, testName.getMethodName(), conf, FAMILY); + this.region = initHRegion(TABLE, FAMILY); try { putData(FAMILY, "row", "col1", 1); @@ -420,7 +411,7 @@ public class TestBlocksRead { byte[] TABLE = Bytes.toBytes("testLazySeekBlocksReadWithDelete"); String FAMILY = "cf1"; Cell kvs[]; - this.region = initHRegion(TABLE, testName.getMethodName(), conf, FAMILY); + this.region = initHRegion(TABLE, FAMILY); try { deleteFamily(FAMILY, "row", 200); for (int i = 0; i < 100; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java index 7ee3f0b..3a4ae23 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestGetClosestAtOrBefore.java @@ -25,8 +25,6 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseTestingUtility; @@ -72,17 +70,14 @@ public class TestGetClosestAtOrBefore { private static final byte[] T40 = Bytes.toBytes("040"); private static HBaseTestingUtility UTIL = new HBaseTestingUtility(); - private static Configuration conf = UTIL.getConfiguration(); @Test public void testUsingMetaAndBinary() throws IOException { - FileSystem filesystem = FileSystem.get(conf); - Path rootdir = UTIL.getDataTestDirOnTestFS(); // Up flush size else we bind up when we use default catalog flush of 16k. UTIL.getMetaTableDescriptor().setMemStoreFlushSize(64 * 1024 * 1024); - Region mr = HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, - rootdir, this.conf, UTIL.getMetaTableDescriptor()); + Region mr = UTIL.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, + UTIL.getMetaTableDescriptor()); try { // Write rows for three tables 'A', 'B', and 'C'. for (char c = 'A'; c < 'D'; c++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java index c9b4217..d168ea5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java @@ -6042,9 +6042,7 @@ public class TestHRegion { HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY); // open the region w/o rss and wal and flush some files - HRegion region = - HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL - .getConfiguration(), htd); + HRegion region = TEST_UTIL.createRegionAndWAL(hri, htd); assertNotNull(region); // create a file in fam1 for the region before opening in OpenRegionHandler @@ -6116,8 +6114,7 @@ public class TestHRegion { HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(fam1)); HRegionInfo info = new HRegionInfo(tableName, null, null, false); - Path path = TEST_UTIL.getDataTestDir(getClass().getSimpleName()); - region = HBaseTestingUtility.createRegionAndWAL(info, path, TEST_UTIL.getConfiguration(), htd); + region = TEST_UTIL.createRegionAndWAL(info, htd); Put put = new Put(Bytes.toBytes("a-b-0-0")); put.addColumn(fam1, qual1, Bytes.toBytes("c1-value")); region.put(put); @@ -6146,9 +6143,7 @@ public class TestHRegion { HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY); // open the region w/o rss and wal and flush some files - HRegion region = - HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDataTestDir(), TEST_UTIL - .getConfiguration(), htd); + HRegion region = TEST_UTIL.createRegionAndWAL(hri, htd); assertNotNull(region); // create a file in fam1 for the region before opening in OpenRegionHandler @@ -6373,9 +6368,9 @@ public class TestHRegion { Configuration conf = new Configuration(TEST_UTIL.getConfiguration()); conf.setInt(HFile.FORMAT_VERSION_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS); - HRegion region = HBaseTestingUtility.createRegionAndWAL(new HRegionInfo(htd.getTableName(), + HRegion region = TEST_UTIL.createRegionAndWAL(new HRegionInfo(htd.getTableName(), HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY), - TEST_UTIL.getDataTestDir(), conf, htd); + htd, conf); assertNotNull(region); try { long now = EnvironmentEdgeManager.currentTime(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java index 6bfaa59..b783322 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java @@ -69,8 +69,6 @@ public class TestPerColumnFamilyFlush { private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); - private static final Path DIR = TEST_UTIL.getDataTestDir("TestHRegion"); - public static final TableName TABLENAME = TableName.valueOf("TestPerColumnFamilyFlush", "t1"); public static final byte[][] FAMILIES = { Bytes.toBytes("f1"), Bytes.toBytes("f2"), @@ -82,14 +80,13 @@ public class TestPerColumnFamilyFlush { public static final byte[] FAMILY3 = FAMILIES[2]; - private HRegion initHRegion(String callingMethod, Configuration conf) throws IOException { + private HRegion initHRegion(Configuration conf) throws IOException { HTableDescriptor htd = new HTableDescriptor(TABLENAME); for (byte[] family : FAMILIES) { htd.addFamily(new HColumnDescriptor(family)); } HRegionInfo info = new HRegionInfo(TABLENAME, null, null, false); - Path path = new Path(DIR, callingMethod); - return HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd); + return TEST_UTIL.createRegionAndWAL(info, htd, conf); } // A helper function to create puts. @@ -130,7 +127,7 @@ public class TestPerColumnFamilyFlush { conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 100 * 1024); // Intialize the region - Region region = initHRegion("testSelectiveFlushWhenEnabled", conf); + Region region = initHRegion(conf); // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3 for (int i = 1; i <= 1200; i++) { region.put(createPut(1, i)); @@ -273,7 +270,7 @@ public class TestPerColumnFamilyFlush { conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushAllStoresPolicy.class.getName()); // Intialize the HRegion - HRegion region = initHRegion("testSelectiveFlushWhenNotEnabled", conf); + HRegion region = initHRegion(conf); // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3 for (int i = 1; i <= 1200; i++) { region.put(createPut(1, i)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java index 57d9365..0b60ed5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java @@ -443,8 +443,8 @@ public class TestRegionMergeTransaction { HColumnDescriptor hcd = new HColumnDescriptor(CF); htd.addFamily(hcd); HRegionInfo hri = new HRegionInfo(htd.getTableName(), startrow, endrow); - HRegion a = HBaseTestingUtility.createRegionAndWAL(hri, testdir, - TEST_UTIL.getConfiguration(), htd); + HRegion a = TEST_UTIL.createRegionAndWAL(hri, htd, TEST_UTIL.getConfiguration(), + testdir); HBaseTestingUtility.closeRegionAndWAL(a); // return HRegion.openHRegion(testdir, hri, htd, // wals.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace()), diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java index 3e02243..03e7814 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestResettingCounters.java @@ -41,7 +41,6 @@ public class TestResettingCounters { @Test public void testResettingCounters() throws Exception { - HBaseTestingUtility htu = new HBaseTestingUtility(); Configuration conf = htu.getConfiguration(); FileSystem fs = FileSystem.get(conf); @@ -62,14 +61,7 @@ public class TestResettingCounters { for (byte [] family : families) htd.addFamily(new HColumnDescriptor(family)); HRegionInfo hri = new HRegionInfo(htd.getTableName(), null, null, false); - String testDir = htu.getDataTestDir() + "/TestResettingCounters/"; - Path path = new Path(testDir); - if (fs.exists(path)) { - if (!fs.delete(path, true)) { - throw new IOException("Failed delete of " + path); - } - } - Region region = HBaseTestingUtility.createRegionAndWAL(hri, path, conf, htd); + Region region = htu.createRegionAndWAL(hri, htd); try { Increment odd = new Increment(rows[0]); odd.setDurability(Durability.SKIP_WAL); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java index 4d3a1c3..4f06f11 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRowTooBig.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.TestLocal_S3FileContextURI; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; @@ -41,7 +42,6 @@ import java.io.IOException; @Category({RegionServerTests.class, MediumTests.class}) public class TestRowTooBig { private final static HBaseTestingUtility HTU = HBaseTestingUtility.createLocalHTU(); - private static Path rootRegionDir; private static final HTableDescriptor TEST_HTD = new HTableDescriptor(TableName.valueOf(TestRowTooBig.class.getSimpleName())); @@ -50,7 +50,6 @@ public class TestRowTooBig { HTU.startMiniCluster(); HTU.getConfiguration().setLong(HConstants.TABLE_MAX_ROWSIZE_KEY, 10 * 1024 * 1024L); - rootRegionDir = HTU.getDataTestDirOnTestFS("TestRowTooBig"); } @AfterClass @@ -85,8 +84,7 @@ public class TestRowTooBig { final HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW, HConstants.EMPTY_END_ROW); - Region region = - HBaseTestingUtility.createRegionAndWAL(hri, rootRegionDir, HTU.getConfiguration(), htd); + Region region = HTU.createRegionAndWAL(hri, htd); try { // Add 5 cells to memstore for (int i = 0; i < 5 ; i++) { @@ -132,8 +130,7 @@ public class TestRowTooBig { final HRegionInfo hri = new HRegionInfo(htd.getTableName(), HConstants.EMPTY_END_ROW, HConstants.EMPTY_END_ROW); - Region region = - HBaseTestingUtility.createRegionAndWAL(hri, rootRegionDir, HTU.getConfiguration(), htd); + Region region = HTU.createRegionAndWAL(hri, htd); try { // Add to memstore for (int i = 0; i < 10; i++) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java index 74826b0..020b854 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java @@ -63,7 +63,7 @@ public class TestWalAndCompactingMemStoreFlush { public static final byte[] FAMILY2 = FAMILIES[1]; public static final byte[] FAMILY3 = FAMILIES[2]; - private HRegion initHRegion(String callingMethod, Configuration conf) throws IOException { + private HRegion initHRegion(Configuration conf) throws IOException { int i=0; HTableDescriptor htd = new HTableDescriptor(TABLENAME); for (byte[] family : FAMILIES) { @@ -75,8 +75,7 @@ public class TestWalAndCompactingMemStoreFlush { } HRegionInfo info = new HRegionInfo(TABLENAME, null, null, false); - Path path = new Path(DIR, callingMethod); - return HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd); + return TEST_UTIL.createRegionAndWAL(info, htd, conf); } // A helper function to create puts. @@ -132,7 +131,7 @@ public class TestWalAndCompactingMemStoreFlush { conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.25); // Intialize the region - Region region = initHRegion("testSelectiveFlushWhenEnabled", conf); + Region region = initHRegion(conf); // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3 for (int i = 1; i <= 1200; i++) { @@ -385,7 +384,7 @@ public class TestWalAndCompactingMemStoreFlush { conf.setInt("hbase.hregion.compacting.memstore.type",1); // Intialize the region - Region region = initHRegion("testSelectiveFlushWhenEnabled", conf); + Region region = initHRegion(conf); // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3 for (int i = 1; i <= 1200; i++) { @@ -629,7 +628,7 @@ public class TestWalAndCompactingMemStoreFlush { conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5); // Intialize the HRegion - HRegion region = initHRegion("testSelectiveFlushWhenNotEnabled", conf); + HRegion region = initHRegion(conf); // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3 for (int i = 1; i <= 1200; i++) { region.put(createPut(1, i)); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java index 3420635..d4bf34f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java @@ -354,8 +354,7 @@ public abstract class AbstractTestFSWAL { final byte[] rowName = tableName.getName(); final HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("f")); - HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, TEST_UTIL.getDefaultRootDirPath(), - TEST_UTIL.getConfiguration(), htd); + HRegion r = TEST_UTIL.createRegionAndWAL(hri, htd); HBaseTestingUtility.closeRegionAndWAL(r); final int countPerFamily = 10; final AtomicBoolean goslow = new AtomicBoolean(false); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java index 661af14..e769ffa 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java @@ -27,7 +27,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.*; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; @@ -65,7 +64,8 @@ public class TestMergeTable { * up mini cluster, disables the hand-made table and starts in on merging. * @throws Exception */ - @Test (timeout=300000) public void testMergeTable() throws Exception { + @Test (timeout=300000) + public void testMergeTable() throws Exception { // Table we are manually creating offline. HTableDescriptor desc = new HTableDescriptor(org.apache.hadoop.hbase.TableName.valueOf(Bytes.toBytes("test"))); desc.addFamily(new HColumnDescriptor(COLUMN_NAME)); @@ -76,14 +76,6 @@ public class TestMergeTable { UTIL.getConfiguration().setInt("hbase.regionserver.regionSplitLimit", 0); // Startup hdfs. Its in here we'll be putting our manually made regions. UTIL.startMiniDFSCluster(1); - // Create hdfs hbase rootdir. - Path rootdir = UTIL.createRootDir(); - FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - if (fs.exists(rootdir)) { - if (fs.delete(rootdir, true)) { - LOG.info("Cleaned up existing " + rootdir); - } - } // Now create three data regions: The first is too large to merge since it // will be > 64 MB in size. The second two will be smaller and will be @@ -96,16 +88,15 @@ public class TestMergeTable { // Create regions and populate them at same time. Create the tabledir // for them first. - new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir).createTableDescriptor(desc); HRegion [] regions = { - createRegion(desc, null, row_70001, 1, 70000, rootdir), - createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir), - createRegion(desc, row_80001, null, 80001, 11000, rootdir) + createRegion(desc, null, row_70001, 1, 70000), + createRegion(desc, row_70001, row_80001, 70001, 10000), + createRegion(desc, row_80001, null, 80001, 11000) }; // Now create the root and meta regions and insert the data regions // created above into hbase:meta - setupMeta(rootdir, regions); + setupMeta(regions); try { LOG.info("Starting mini zk cluster"); UTIL.startMiniZKCluster(); @@ -137,11 +128,10 @@ public class TestMergeTable { } private HRegion createRegion(final HTableDescriptor desc, - byte [] startKey, byte [] endKey, int firstRow, int nrows, Path rootdir) + byte [] startKey, byte [] endKey, int firstRow, int nrows) throws IOException { HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey); - HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, rootdir, UTIL.getConfiguration(), - desc); + HRegion region = UTIL.createRegionAndWAL(hri, desc); LOG.info("Created region " + region.getRegionInfo().getRegionNameAsString()); for(int i = firstRow; i < firstRow + nrows; i++) { Put put = new Put(Bytes.toBytes("row_" + String.format("%1$05d", i))); @@ -157,16 +147,13 @@ public class TestMergeTable { return region; } - protected void setupMeta(Path rootdir, final HRegion [] regions) - throws IOException { - HRegion meta = - HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, rootdir, - UTIL.getConfiguration(), UTIL.getMetaTableDescriptor()); - for (HRegion r: regions) { + protected void setupMeta(final HRegion[] regions) throws IOException { + HRegion meta = UTIL.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, + UTIL.getMetaTableDescriptor()); + for (HRegion r : regions) { HRegion.addRegionToMETA(meta, r); } HBaseTestingUtility.closeRegionAndWAL(meta); } - } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java index 1924c9e..22a197d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java @@ -154,9 +154,7 @@ public class TestMergeTool extends HBaseTestCase { * Create the regions we will merge */ for (int i = 0; i < sourceRegions.length; i++) { - regions[i] = - HBaseTestingUtility.createRegionAndWAL(this.sourceRegions[i], testDir, this.conf, - this.desc); + regions[i] = TEST_UTIL.createRegionAndWAL(sourceRegions[i], desc, conf, testDir); /* * Insert data */ -- 2.3.2 (Apple Git-55)