diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index ed2a453..e8a27e2 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -137,12 +137,16 @@ public class HBaseTestingUtility { private String hadoopLogDir; - // Directory where we put the data for this instance of HBaseTestingUtility. + /** Directory where we put the data for this instance of HBaseTestingUtility*/ private File dataTestDir = null; - // Directory (a subdirectory of dataTestDir) used by the dfs cluster if any + /** Directory (a subdirectory of dataTestDir) used by the dfs cluster if any */ private File clusterTestDir = null; + /** Directory on test filesystem where we put the data for this instance of + * HBaseTestingUtility*/ + private Path dataTestDirOnTestFS = null; + /** * System property key to get test directory value. * Name is as it is because mini dfs has hard-codings to put test data here. @@ -252,6 +256,17 @@ public class HBaseTestingUtility { } /** + * @return Where to write test data on the test filesystem; Returns working directory + * for the test filesytem by default + * @see #setupDataTestDirOnTestFS() + * @see #getTestFileSystem() + */ + private Path getBaseTestDirOnTestFS() throws IOException { + FileSystem fs = getTestFileSystem(); + return new Path(fs.getWorkingDirectory(), "test-data"); + } + + /** * @return Where to write test data on local filesystem, specific to * the test. Useful for tests that do not use a cluster. * Creates it if it does not exist already. @@ -305,6 +320,31 @@ public class HBaseTestingUtility { } /** + * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} + * to write temporary test data. Call this method after setting up the mini dfs cluster + * if the test relies on it. + * @return a unique path in the test filesystem + */ + public Path getDataTestDirOnTestFS() throws IOException { + if (dataTestDirOnTestFS == null) { + setupDataTestDirOnTestFS(); + } + + return dataTestDirOnTestFS; + } + + /** + * Returns a Path in the test filesystem, obtained from {@link #getTestFileSystem()} + * to write temporary test data. Call this method after setting up the mini dfs cluster + * if the test relies on it. + * @return a unique path in the test filesystem + * @param subdirName name of the subdir to create under the base test dir + */ + public Path getDataTestDirOnTestFS(final String subdirName) throws IOException { + return new Path(getDataTestDirOnTestFS(), subdirName); + } + + /** * Home our data in a dir under {@link #DEFAULT_BASE_TEST_DIRECTORY}. * Give it a random name so can have many concurrent tests running if * we need to. It needs to amend the {@link #TEST_DIRECTORY_KEY} @@ -347,10 +387,6 @@ public class HBaseTestingUtility { createSubDir( "mapred.local.dir", testPath, "mapred-local-dir"); - - createSubDirAndSystemProperty( - "mapred.working.dir", - testPath, "mapred-working-dir"); } private void createSubDir(String propertyName, Path parent, String subDirName){ @@ -386,6 +422,34 @@ public class HBaseTestingUtility { } } + + /** + * Sets up a path in test filesystem to be used by tests + */ + private void setupDataTestDirOnTestFS() throws IOException { + if (dataTestDirOnTestFS != null) { + LOG.warn("Data test on test fs dir already setup in " + + dataTestDirOnTestFS.toString()); + return; + } + + //The file system can be either local, mini dfs, or if the configuration + //is supplied externally, it can be an external cluster FS. If it is a local + //file system, the tests should use getBaseTestDir, otherwise, we can use + //the working directory, and create a unique sub dir there + FileSystem fs = getTestFileSystem(); + if (fs.getUri().getScheme().equals(fs.getLocal(conf).getUri().getScheme())) { + if (dataTestDir == null) { + setupDataTestDir(); + } + dataTestDirOnTestFS = new Path(dataTestDir.getAbsolutePath()); + } else { + Path base = getBaseTestDirOnTestFS(); + String randomStr = UUID.randomUUID().toString(); + dataTestDirOnTestFS = new Path(base, randomStr); + fs.deleteOnExit(dataTestDirOnTestFS); + } + } /** * Start a minidfscluster. * @param servers How many DNs to start. @@ -441,6 +505,9 @@ public class HBaseTestingUtility { // Wait for the cluster to be totally up this.dfsCluster.waitClusterUp(); + //reset the test directory for test file system + dataTestDirOnTestFS = null; + return this.dfsCluster; } @@ -460,6 +527,9 @@ public class HBaseTestingUtility { // Wait for the cluster to be totally up this.dfsCluster.waitClusterUp(); + //reset the test directory for test file system + dataTestDirOnTestFS = null; + return this.dfsCluster; } @@ -471,18 +541,23 @@ public class HBaseTestingUtility { } /** This is used before starting HDFS and map-reduce mini-clusters */ - private void createDirsAndSetProperties() { + private void createDirsAndSetProperties() throws IOException { setupClusterTestDir(); System.setProperty(TEST_DIRECTORY_KEY, clusterTestDir.getPath()); createDirAndSetProperty("cache_data", "test.cache.data"); createDirAndSetProperty("hadoop_tmp", "hadoop.tmp.dir"); hadoopLogDir = createDirAndSetProperty("hadoop_logs", "hadoop.log.dir"); - createDirAndSetProperty("mapred_output", MapreduceTestingShim.getMROutputDirProp()); createDirAndSetProperty("mapred_local", "mapred.local.dir"); - createDirAndSetProperty("mapred_system", "mapred.system.dir"); createDirAndSetProperty("mapred_temp", "mapred.temp.dir"); enableShortCircuit(); + Path root = getDataTestDirOnTestFS("hadoop"); + conf.set(MapreduceTestingShim.getMROutputDirProp(), + new Path(root, "mapred-output-dir").toString()); + conf.set("mapred.system.dir", new Path(root, "mapred-system-dir").toString()); + conf.set("mapreduce.jobtracker.staging.root.dir", + new Path(root, "mapreduce-jobtracker-staging-root-dir").toString()); + conf.set("mapred.working.dir", new Path(root, "mapred-working-dir").toString()); } @@ -520,7 +595,7 @@ public class HBaseTestingUtility { } private String createDirAndSetProperty(final String relPath, String property) { - String path = clusterTestDir.getPath() + "/" + relPath; + String path = getDataTestDir(relPath).toString(); System.setProperty(property, path); conf.set(property, path); new File(path).mkdirs(); @@ -538,6 +613,9 @@ public class HBaseTestingUtility { // The below throws an exception per dn, AsynchronousCloseException. this.dfsCluster.shutdown(); dfsCluster = null; + dataTestDirOnTestFS = null; + this.conf.set("fs.defaultFS", "file:///"); + this.conf.set("fs.default.name", "file:///"); } } @@ -1473,7 +1551,8 @@ public class HBaseTestingUtility { // Allow the user to override FS URI for this map-reduce cluster to use. mrCluster = new MiniMRCluster(servers, - FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1); + FS_URI != null ? FS_URI : FileSystem.get(conf).getUri().toString(), 1, + null, null, new JobConf(this.conf)); JobConf jobConf = MapreduceTestingShim.getJobConf(mrCluster); if (jobConf == null) { jobConf = mrCluster.createJobConf(); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java index 6951095..8ceb712 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java @@ -465,7 +465,7 @@ public class TestRegionObserverInterface { ); FileSystem fs = util.getTestFileSystem(); - final Path dir = util.getDataTestDir(testName).makeQualified(fs); + final Path dir = util.getDataTestDirOnTestFS(testName).makeQualified(fs); Path familyDir = new Path(dir, Bytes.toString(A)); createHFile(util.getConfiguration(), fs, new Path(familyDir,Bytes.toString(A)), A, A); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java index a432662..e7eeec3 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java @@ -288,7 +288,7 @@ public class TestHFileOutputFormat { @Test public void testWritingPEData() throws Exception { Configuration conf = util.getConfiguration(); - Path testDir = util.getDataTestDir("testWritingPEData"); + Path testDir = util.getDataTestDirOnTestFS("testWritingPEData"); FileSystem fs = testDir.getFileSystem(conf); // Set down this value or we OOME in eclipse. @@ -357,11 +357,11 @@ public class TestHFileOutputFormat { boolean shouldChangeRegions) throws Exception { util = new HBaseTestingUtility(); Configuration conf = util.getConfiguration(); - Path testDir = util.getDataTestDir("testLocalMRIncrementalLoad"); byte[][] startKeys = generateRandomStartKeys(5); try { util.startMiniCluster(); + Path testDir = util.getDataTestDirOnTestFS("testLocalMRIncrementalLoad"); HBaseAdmin admin = new HBaseAdmin(conf); HTable table = util.createTable(TABLE_NAME, FAMILIES); assertEquals("Should start with empty table", @@ -449,7 +449,7 @@ public class TestHFileOutputFormat { Configuration conf, HTable table, Path outDir) throws Exception { Job job = new Job(conf, "testLocalMRIncrementalLoad"); - job.setWorkingDirectory(util.getDataTestDir("runIncrementalPELoad")); + job.setWorkingDirectory(util.getDataTestDirOnTestFS("runIncrementalPELoad")); setupRandomGeneratorMapper(job); HFileOutputFormat.configureIncrementalLoad(job, table); FileOutputFormat.setOutputPath(job, outDir); @@ -546,7 +546,7 @@ public class TestHFileOutputFormat { RecordWriter writer = null; TaskAttemptContext context = null; Path dir = - util.getDataTestDir("testColumnFamilyCompression"); + util.getDataTestDirOnTestFS("testColumnFamilyCompression"); HTable table = Mockito.mock(HTable.class); @@ -570,7 +570,7 @@ public class TestHFileOutputFormat { // pollutes the GZip codec pool with an incompatible compressor. conf.set("io.seqfile.compression.type", "NONE"); Job job = new Job(conf, "testLocalMRIncrementalLoad"); - job.setWorkingDirectory(util.getDataTestDir("testColumnFamilyCompression")); + job.setWorkingDirectory(util.getDataTestDirOnTestFS("testColumnFamilyCompression")); setupRandomGeneratorMapper(job); HFileOutputFormat.configureIncrementalLoad(job, table); FileOutputFormat.setOutputPath(job, dir); @@ -703,7 +703,7 @@ public class TestHFileOutputFormat { util.startMiniMapReduceCluster(); for (int i = 0; i < 2; i++) { - Path testDir = util.getDataTestDir("testExcludeAllFromMinorCompaction_" + i); + Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i); runIncrementalPELoad(conf, table, testDir); // Perform the actual load new LoadIncrementalHFiles(conf).doBulkLoad(testDir, table); @@ -748,11 +748,11 @@ public class TestHFileOutputFormat { public void testExcludeMinorCompaction() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); - Path testDir = util.getDataTestDir("testExcludeMinorCompaction"); generateRandomStartKeys(5); try { util.startMiniCluster(); + Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction"); final FileSystem fs = util.getDFSCluster().getFileSystem(); HBaseAdmin admin = new HBaseAdmin(conf); HTable table = util.createTable(TABLE_NAME, FAMILIES); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java index c38e37d..a33071c 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java @@ -126,7 +126,7 @@ public class TestLoadIncrementalHFiles { private void runTest(String testName, BloomType bloomType, byte[][][] hfileRanges) throws Exception { - Path dir = util.getDataTestDir(testName); + Path dir = util.getDataTestDirOnTestFS(testName); FileSystem fs = util.getTestFileSystem(); dir = dir.makeQualified(fs); Path familyDir = new Path(dir, Bytes.toString(FAMILY)); @@ -209,7 +209,7 @@ public class TestLoadIncrementalHFiles { @Test public void testSplitStoreFile() throws IOException { - Path dir = util.getDataTestDir("testSplitHFile"); + Path dir = util.getDataTestDirOnTestFS("testSplitHFile"); FileSystem fs = util.getTestFileSystem(); Path testIn = new Path(dir, "testhfile"); HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java index 7053b58..f3d76e2 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java @@ -126,7 +126,7 @@ public class TestLoadIncrementalHFilesSplitRecovery { } private Path buildBulkFiles(String table, int value) throws Exception { - Path dir = util.getDataTestDir(table); + Path dir = util.getDataTestDirOnTestFS(table); Path bulk1 = new Path(dir, table+value); FileSystem fs = util.getTestFileSystem(); buildHFiles(fs, bulk1, value); diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java index 10c0475..6b821de 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java @@ -63,7 +63,7 @@ public class TestHFileCleaner { @Test public void testTTLCleaner() throws IOException, InterruptedException { FileSystem fs = UTIL.getDFSCluster().getFileSystem(); - Path root = UTIL.getDataTestDir(); + Path root = UTIL.getDataTestDirOnTestFS(); Path file = new Path(root, "file"); fs.createNewFile(file); long createTime = System.currentTimeMillis(); @@ -97,7 +97,7 @@ public class TestHFileCleaner { long ttl = 2000; conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl); Server server = new DummyServer(); - Path archivedHfileDir = new Path(UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY); + Path archivedHfileDir = new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY); FileSystem fs = FileSystem.get(conf); HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archivedHfileDir); @@ -163,7 +163,7 @@ public class TestHFileCleaner { // no cleaner policies = delete all files conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, ""); Server server = new DummyServer(); - Path archivedHfileDir = new Path(UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY); + Path archivedHfileDir = new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY); // setup the cleaner FileSystem fs = UTIL.getDFSCluster().getFileSystem(); @@ -234,4 +234,4 @@ public class TestHFileCleaner { return false; } } -} \ No newline at end of file +} diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java index 8656dd2..d7f5ce9 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoad.java @@ -106,7 +106,7 @@ public class TestHRegionServerBulkLoad { /** * Thread that does full scans of the table looking for any partially * completed rows. - * + * * Each iteration of this loads 10 hdfs files, which occupies 5 file open file * handles. So every 10 iterations (500 file handles) it does a region * compaction to reduce the number of open file handles. @@ -124,7 +124,7 @@ public class TestHRegionServerBulkLoad { public void doAnAction() throws Exception { long iteration = numBulkLoads.getAndIncrement(); - Path dir = UTIL.getDataTestDir(String.format("bulkLoad_%08d", + Path dir = UTIL.getDataTestDirOnTestFS(String.format("bulkLoad_%08d", iteration)); // create HFiles for different column families diff --git hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java index ce502c4..265f777 100644 --- hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java +++ hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLog.java @@ -403,7 +403,7 @@ public class TestHLog { try { DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem(); dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER); - cluster.shutdown(); + TEST_UTIL.shutdownMiniDFSCluster(); try { // wal.writer.close() will throw an exception, // but still call this since it closes the LogSyncer thread first