diff --git src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index e15eb47..dd8c295 100644 --- src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -121,13 +121,16 @@ public class HBaseTestingUtility { private MiniHBaseCluster hbaseCluster = null; private MiniMRCluster mrCluster = null; - // Directory where we put the data for this instance of HBaseTestingUtility + /** Directory where we put the data for this instance of HBaseTestingUtility*/ private File dataTestDir = null; - // Directory (usually a subdirectory of dataTestDir) used by the dfs cluster - // if any + /** Directory (usually a subdirectory of dataTestDir) used by the dfs cluster if any */ private File clusterTestDir = null; + /** Directory on test filesystem where we put the data for this instance of + * HBaseTestingUtility*/ + private Path dataTestDirOnTestFS = null; + /** * System property key to get test directory value. * Name is as it is because mini dfs has hard-codings to put test data here. @@ -227,6 +230,17 @@ public class HBaseTestingUtility { } /** + * @return Where to write test data on the test filesystem; Returns working directory + * for the test filesytem by default + * @see #setupDataTestDirOnTestFS() + * @see #getTestFileSystem() + */ + private Path getBaseTestDirOnTestFS() throws IOException { + FileSystem fs = getTestFileSystem(); + return new Path(fs.getWorkingDirectory(), "test-data"); + } + + /** * @return Where to write test data on local filesystem, specific to * the test. Useful for tests that do not use a cluster. * Creates it if it does not exist already. @@ -262,6 +276,31 @@ public class HBaseTestingUtility { } /** + * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} + * to write temporary test data. Call this method after setting up the mini dfs cluster + * if the test relies on it. + * @return a unique path in the test filesystem + */ + public Path getDataTestDirOnTestFS() throws IOException { + if (dataTestDirOnTestFS == null) { + setupDataTestDirOnTestFS(); + } + + return dataTestDirOnTestFS; + } + + /** + * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} + * to write temporary test data. Call this method after setting up the mini dfs cluster + * if the test relies on it. + * @return a unique path in the test filesystem + * @param subdirName name of the subdir to create under the base test dir + */ + public Path getDataTestDirOnTestFS(final String subdirName) throws IOException { + return new Path(getDataTestDirOnTestFS(), subdirName); + } + + /** * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}. * Give it a random name so can have many concurrent tests running if * we need to. It needs to amend the {@link #TEST_DIRECTORY_KEY} @@ -304,10 +343,6 @@ public class HBaseTestingUtility { createSubDir( "mapred.local.dir", testPath, "mapred-local-dir"); - - createSubDirAndSystemProperty( - "mapred.working.dir", - testPath, "mapred-working-dir"); } private void createSubDir(String propertyName, Path parent, String subDirName){ @@ -361,6 +396,34 @@ public class HBaseTestingUtility { clusterTestDir.deleteOnExit(); } + + /** + * Sets up a path in test filesystem to be used by tests + */ + private void setupDataTestDirOnTestFS() throws IOException { + if (dataTestDirOnTestFS != null) { + LOG.warn("Data test on test fs dir already setup in " + + dataTestDirOnTestFS.toString()); + return; + } + + //The file system can be either local, mini dfs, or if the configuration + //is supplied externally, it can be an external cluster FS. If it is a local + //file system, the tests should use getBaseTestDir, otherwise, we can use + //the working directory, and create a unique sub dir there + FileSystem fs = getTestFileSystem(); + if (fs.getUri().getScheme().equals(fs.getLocal(conf).getUri().getScheme())) { + if (dataTestDir == null) { + setupDataTestDir(); + } + dataTestDirOnTestFS = new Path(dataTestDir.getAbsolutePath()); + } else { + Path base = getBaseTestDirOnTestFS(); + String randomStr = UUID.randomUUID().toString(); + dataTestDirOnTestFS = new Path(base, randomStr); + fs.deleteOnExit(dataTestDirOnTestFS); + } + } /** * @throws IOException If a cluster -- zk, dfs, or hbase -- already running. */ @@ -421,6 +484,9 @@ public class HBaseTestingUtility { setupClusterTestDir(); } + //reset the test directory for test file system + dataTestDirOnTestFS = null; + // We have to set this property as it is used by MiniCluster System.setProperty(TEST_DIRECTORY_KEY, this.clusterTestDir.toString()); @@ -455,6 +521,7 @@ public class HBaseTestingUtility { // The below throws an exception per dn, AsynchronousCloseException. this.dfsCluster.shutdown(); dfsCluster = null; + dataTestDirOnTestFS = null; } } @@ -1292,9 +1359,16 @@ public class HBaseTestingUtility { logDir = tmpDir; } System.setProperty("hadoop.log.dir", logDir); - c.set("mapred.output.dir", tmpDir); + + Path root = getDataTestDirOnTestFS("hadoop"); + c.set("mapred.output.dir", new Path(root, "mapred-output-dir").toString()); + c.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString()); + c.set("mapreduce.jobtracker.staging.root.dir", + new Path(root, "mapreduce-jobtracker-staging-root-dir").toString()); + c.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString()); + mrCluster = new MiniMRCluster(servers, - FileSystem.get(conf).getUri().toString(), 1); + FileSystem.get(conf).getUri().toString(), 1, null, null, new JobConf(this.conf)); LOG.info("Mini mapreduce cluster started"); JobConf mrClusterJobConf = mrCluster.createJobConf(); c.set("mapred.job.tracker", mrClusterJobConf.get("mapred.job.tracker")); diff --git src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java index 36318f2..b59be7d 100644 --- src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java +++ src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java @@ -448,7 +448,7 @@ public class TestRegionObserverInterface { ); FileSystem fs = util.getTestFileSystem(); - final Path dir = util.getDataTestDir(testName).makeQualified(fs); + final Path dir = util.getDataTestDirOnTestFS(testName).makeQualified(fs); Path familyDir = new Path(dir, Bytes.toString(A)); createHFile(util.getConfiguration(), fs, new Path(familyDir,Bytes.toString(A)), A, A); diff --git src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java index c21cd05..e991355 100644 --- src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java +++ src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java @@ -33,17 +33,25 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; -import java.util.concurrent.Callable; import java.util.Random; +import java.util.concurrent.Callable; import junit.framework.Assert; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.LargeTests; +import org.apache.hadoop.hbase.PerformanceEvaluation; import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; @@ -272,7 +280,7 @@ public class TestHFileOutputFormat { // verify that the file has the proper FileInfo. writer.close(context); - // the generated file lives 1 directory down from the attempt directory + // the generated file lives 1 directory down from the attempt directory // and is the only file, e.g. // _attempt__0000_r_000000_0/b/1979617994050536795 FileSystem fs = FileSystem.get(conf); @@ -307,7 +315,7 @@ public class TestHFileOutputFormat { @Test public void testWritingPEData() throws Exception { Configuration conf = util.getConfiguration(); - Path testDir = util.getDataTestDir("testWritingPEData"); + Path testDir = util.getDataTestDirOnTestFS("testWritingPEData"); FileSystem fs = testDir.getFileSystem(conf); // Set down this value or we OOME in eclipse. @@ -372,11 +380,11 @@ public class TestHFileOutputFormat { private void doIncrementalLoadTest( boolean shouldChangeRegions) throws Exception { Configuration conf = util.getConfiguration(); - Path testDir = util.getDataTestDir("testLocalMRIncrementalLoad"); byte[][] startKeys = generateRandomStartKeys(5); try { util.startMiniCluster(); + Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad"); HBaseAdmin admin = new HBaseAdmin(conf); HTable table = util.createTable(TABLE_NAME, FAMILIES); assertEquals("Should start with empty table", @@ -763,11 +771,11 @@ public class TestHFileOutputFormat { public void testExcludeMinorCompaction() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); - Path testDir = util.getDataTestDir("testExcludeMinorCompaction"); generateRandomStartKeys(5); try { util.startMiniCluster(); + Path testDir = util.getDataTestDir("testExcludeMinorCompaction"); final FileSystem fs = util.getDFSCluster().getFileSystem(); HBaseAdmin admin = new HBaseAdmin(conf); HTable table = util.createTable(TABLE_NAME, FAMILIES); diff --git src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java index d0f9ef7..23ff7dd 100644 --- src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java +++ src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java @@ -124,7 +124,7 @@ public class TestLoadIncrementalHFiles { private void runTest(String testName, BloomType bloomType, byte[][][] hfileRanges) throws Exception { - Path dir = util.getDataTestDir(testName); + Path dir = util.getDataTestDirOnTestFS(testName); FileSystem fs = util.getTestFileSystem(); dir = dir.makeQualified(fs); Path familyDir = new Path(dir, Bytes.toString(FAMILY)); @@ -158,7 +158,7 @@ public class TestLoadIncrementalHFiles { @Test public void testSplitStoreFile() throws IOException { - Path dir = util.getDataTestDir("testSplitHFile"); + Path dir = util.getDataTestDirOnTestFS("testSplitHFile"); FileSystem fs = util.getTestFileSystem(); Path testIn = new Path(dir, "testhfile"); HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY); diff --git src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java index 301ee27..a58cafd 100644 --- src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java +++ src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java @@ -123,7 +123,7 @@ public class TestLoadIncrementalHFilesSplitRecovery { } private Path buildBulkFiles(String table, int value) throws Exception { - Path dir = util.getDataTestDir(table); + Path dir = util.getDataTestDirOnTestFS(table); Path bulk1 = new Path(dir, table+value); FileSystem fs = util.getTestFileSystem(); buildHFiles(fs, bulk1, value); diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java index a1bf73b..752b221 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java @@ -102,7 +102,7 @@ public class TestHRegionServerBulkLoad { /** * Thread that does full scans of the table looking for any partially * completed rows. - * + * * Each iteration of this loads 10 hdfs files, which occupies 5 file open file * handles. So every 10 iterations (500 file handles) it does a region * compaction to reduce the number of open file handles. @@ -120,7 +120,7 @@ public class TestHRegionServerBulkLoad { public void doAnAction() throws Exception { long iteration = numBulkLoads.getAndIncrement(); - Path dir = UTIL.getDataTestDir(String.format("bulkLoad_%08d", + Path dir = UTIL.getDataTestDirOnTestFS(String.format("bulkLoad_%08d", iteration)); // create HFiles for different column families