diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java index f460bdb..eddf8f1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SecureBulkLoadClient.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.PrepareBulkLoadRe import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.PrepareBulkLoadResponse; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; -import org.apache.hadoop.hbase.security.SecureBulkLoadUtil; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.security.token.Token; @@ -125,8 +124,4 @@ public class SecureBulkLoadClient { throw ProtobufUtil.handleRemoteException(se); } } - - public Path getStagingPath(String bulkToken, byte[] family) throws IOException { - return SecureBulkLoadUtil.getStagingPath(table.getConfiguration(), bulkToken, family); - } } \ No newline at end of file diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java deleted file mode 100644 index 5af6891..0000000 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java +++ /dev/null @@ -1,46 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hbase.security; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.util.Bytes; - -@InterfaceAudience.Private -public class SecureBulkLoadUtil { - private final static String BULKLOAD_STAGING_DIR = "hbase.bulkload.staging.dir"; - - /** - * This returns the staging path for a given column family. - * This is needed for clean recovery and called reflectively in LoadIncrementalHFiles - */ - public static Path getStagingPath(Configuration conf, String bulkToken, byte[] family) { - Path stageP = new Path(getBaseStagingDir(conf), bulkToken); - return new Path(stageP, Bytes.toString(family)); - } - - public static Path getBaseStagingDir(Configuration conf) { - String hbaseTmpFsDir = - conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, - HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY); - return new Path(conf.get(BULKLOAD_STAGING_DIR, hbaseTmpFsDir)); - } -} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index 4c499a2..b5131fc 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -305,6 +305,9 @@ public final class HConstants { /** Like the previous, but for old logs that are about to be deleted */ public static final String HREGION_OLDLOGDIR_NAME = "oldWALs"; + /** Staging dir used by bulk load */ + public static final String BULKLOAD_STAGING_DIR_NAME = "staging"; + public static final String CORRUPT_DIR_NAME = "corrupt"; /** Used by HBCK to sideline backup data */ diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml index a791717..dd96941 100644 --- a/hbase-common/src/main/resources/hbase-default.xml +++ b/hbase-common/src/main/resources/hbase-default.xml @@ -70,13 +70,6 @@ possible configurations would overwhelm and obscure the important. - hbase.bulkload.staging.dir - ${hbase.fs.tmp.dir} - A staging directory in default file system (HDFS) - for bulk loading. - - - hbase.cluster.distributed false The mode the cluster will be in. Possible values are @@ -1190,7 +1183,7 @@ possible configurations would overwhelm and obscure the important. hbase.rootdir.perms 700 - FS Permissions for the root directory in a secure (kerberos) setup. + FS Permissions for the root data subdirectory in a secure (kerberos) setup. When master starts, it creates the rootdir with this permissions or sets the permissions if it does not match. diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java index 4e7c8d2..3cae4d2 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/HBaseCommonTestingUtility.java @@ -118,7 +118,6 @@ public class HBaseCommonTestingUtility { if (deleteOnExit()) this.dataTestDir.deleteOnExit(); createSubDir("hbase.local.dir", testPath, "hbase-local-dir"); - createSubDir("hbase.bulkload.staging.dir", testPath, "staging"); return testPath; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 0ce7411..28c86e1 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.fs.HFileSystem; +import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants; import org.apache.hadoop.hbase.mob.MobConstants; import org.apache.hadoop.hbase.mob.MobUtils; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -64,6 +65,16 @@ public class MasterFileSystem { // hbase temp directory used for table construction and deletion private final Path tempdir; + // permissions for rootDir, need to have 'x' for all + private final FsPermission rootPerms = FsPermission.valueOf("-rwxr-xr-x"); + // permissions for the files under rootDir that need secure protection + private final FsPermission secureRootFilePerms = FsPermission.valueOf("-rw-------"); + // permissions for the directories under rootDir that need secure protection + private final FsPermission secureRootSubDirPerms; + // permissions for bulk load staging directory under rootDir + private final FsPermission PERM_HIDDEN = FsPermission.valueOf("-rwx--x--x"); + private boolean isSecurityEnabled; + private final MasterServices services; public MasterFileSystem(MasterServices services) throws IOException { @@ -81,6 +92,8 @@ public class MasterFileSystem { FSUtils.setFsDefault(conf, new Path(this.fs.getUri())); // make sure the fs has the same conf fs.setConf(conf); + this.secureRootSubDirPerms = new FsPermission(conf.get("hbase.rootdir.perms", "700")); + this.isSecurityEnabled = "kerberos".equalsIgnoreCase(conf.get("hbase.security.authentication")); // setup the filesystem variable createInitialFileSystemLayout(); HFileSystem.addLocationsOrderInterceptor(conf); @@ -99,8 +112,26 @@ public class MasterFileSystem { // check if the root directory exists checkRootDir(this.rootdir, conf, this.fs); - // check if temp directory exists and clean it + // Check the directories under rootdir. Add your directory here. checkTempDir(this.tempdir, conf, this.fs); + checkSubDir(new Path(this.rootdir, HConstants.BASE_NAMESPACE_DIR)); + checkSubDir(new Path(this.rootdir, HConstants.HFILE_ARCHIVE_DIRECTORY)); + checkSubDir(new Path(this.rootdir, HConstants.HREGION_LOGDIR_NAME)); + checkSubDir(new Path(this.rootdir, HConstants.HREGION_OLDLOGDIR_NAME)); + checkSubDir(new Path(this.rootdir, MasterProcedureConstants.MASTER_PROCEDURE_LOGDIR)); + checkSubDir(new Path(this.rootdir, HConstants.CORRUPT_DIR_NAME)); + checkSubDir(new Path(this.rootdir, HConstants.HBCK_SIDELINEDIR_NAME)); + checkSubDir(new Path(this.rootdir, MobConstants.MOB_DIR_NAME)); + + checkStagingDir(); + + // Handle the last few special files and set the final rootdir permissions + // rootdir needs 'x' for all for bulk load staging dir + if (isSecurityEnabled) { + fs.setPermission(new Path(rootdir, HConstants.VERSION_FILE_NAME), secureRootFilePerms); + fs.setPermission(new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME), secureRootFilePerms); + } + fs.setPermission(this.rootdir, this.rootPerms); } public FileSystem getFileSystem() { @@ -146,17 +177,10 @@ public class MasterFileSystem { // If FS is in safe mode wait till out of it. FSUtils.waitOnSafeMode(c, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000)); - boolean isSecurityEnabled = "kerberos".equalsIgnoreCase(c.get("hbase.security.authentication")); - FsPermission rootDirPerms = new FsPermission(c.get("hbase.rootdir.perms", "700")); - // Filesystem is good. Go ahead and check for hbase.rootdir. try { if (!fs.exists(rd)) { - if (isSecurityEnabled) { - fs.mkdirs(rd, rootDirPerms); - } else { - fs.mkdirs(rd); - } + fs.mkdirs(rd); // DFS leaves safe mode with 0 DNs when there are 0 blocks. // We used to handle this by checking the current DN count and waiting until // it is nonzero. With security, the check for datanode count doesn't work -- @@ -171,16 +195,6 @@ public class MasterFileSystem { if (!fs.isDirectory(rd)) { throw new IllegalArgumentException(rd.toString() + " is not a directory"); } - if (isSecurityEnabled && !rootDirPerms.equals(fs.getFileStatus(rd).getPermission())) { - // check whether the permission match - LOG.warn("Found rootdir permissions NOT matching expected \"hbase.rootdir.perms\" for " - + "rootdir=" + rd.toString() + " permissions=" + fs.getFileStatus(rd).getPermission() - + " and \"hbase.rootdir.perms\" configured as " - + c.get("hbase.rootdir.perms", "700") + ". Automatically setting the permissions. You" - + " can change the permissions by setting \"hbase.rootdir.perms\" in hbase-site.xml " - + "and restarting the master"); - fs.setPermission(rd, rootDirPerms); - } // as above FSUtils.checkVersion(fs, rd, true, c.getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000), c.getInt(HConstants.VERSION_FILE_WRITE_ATTEMPTS, @@ -239,8 +253,67 @@ public class MasterFileSystem { } // Create the temp directory - if (!fs.mkdirs(tmpdir)) { - throw new IOException("HBase temp directory '" + tmpdir + "' creation failure."); + if (isSecurityEnabled) { + if (!fs.mkdirs(tmpdir, secureRootSubDirPerms)) { + throw new IOException("HBase temp directory '" + tmpdir + "' creation failure."); + } + } else { + if (!fs.mkdirs(tmpdir)) { + throw new IOException("HBase temp directory '" + tmpdir + "' creation failure."); + } + } + } + + /** + * Make sure the directories under rootDir have good permissions. Create if necessary. + * @param p + * @throws IOException + */ + private void checkSubDir(final Path p) throws IOException { + if (!fs.exists(p)) { + if (isSecurityEnabled) { + if (!fs.mkdirs(p, secureRootSubDirPerms)) { + throw new IOException("HBase directory '" + p + "' creation failure."); + } + } else { + if (!fs.mkdirs(p)) { + throw new IOException("HBase directory '" + p + "' creation failure."); + } + } + } + else { + if (isSecurityEnabled && !secureRootSubDirPerms.equals(fs.getFileStatus(p).getPermission())) { + // check whether the permission match + LOG.warn("Found HBase directory permissions NOT matching expected \"hbase.rootdir.perms\" for " + + p.toString() + " permissions=" + fs.getFileStatus(p).getPermission() + + " and \"hbase.rootdir.perms\" configured as " + + this.conf.get("hbase.rootdir.perms", "700") + ". Automatically setting the permissions. You" + + " can change the permissions by setting \"hbase.rootdir.perms\" in hbase-site.xml " + + "and restarting the master"); + fs.setPermission(p, secureRootSubDirPerms); + } + } + } + + /** + * Check permissions for bulk load staging directory. This directory has special hidden + * permissions. Create it if necessary. + * @throws IOException + */ + private void checkStagingDir() throws IOException { + Path p = new Path(this.rootdir, HConstants.BULKLOAD_STAGING_DIR_NAME); + try { + if (!this.fs.exists(p)) { + if (!this.fs.mkdirs(p, PERM_HIDDEN)) { + throw new IOException("Failed to create staging directory " + p.toString()); + } + } else { + this.fs.setPermission(p, PERM_HIDDEN); + } + } catch (IOException e) { + LOG.error("Failed to create or set permission on staging directory " + p.toString()); + throw new IOException("Failed to create or set permission on staging directory " + + p.toString(), e); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java index ab0dd4f..6941d81 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java @@ -105,11 +105,6 @@ public class MasterWalManager { this.distributedLogReplay = this.splitLogManager.isLogReplaying(); this.oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME); - - // Make sure the region servers can archive their old logs - if (!this.fs.exists(oldLogDir)) { - this.fs.mkdirs(oldLogDir); - } } public void stop() { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java index 9f53ac5..fabbe7a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadManager.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.DoNotRetryIOException; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.coprocessor.BulkLoadObserver; @@ -38,12 +39,12 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CleanupBulkLoadRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.PrepareBulkLoadRequest; import org.apache.hadoop.hbase.regionserver.Region.BulkLoadListener; -import org.apache.hadoop.hbase.security.SecureBulkLoadUtil; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.token.FsDelegationToken; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSHDFSUtils; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Methods; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.io.Text; @@ -99,6 +100,8 @@ public class SecureBulkLoadManager { private final static FsPermission PERM_ALL_ACCESS = FsPermission.valueOf("-rwxrwxrwx"); private final static FsPermission PERM_HIDDEN = FsPermission.valueOf("-rwx--x--x"); + private final static String BULKLOAD_STAGING_DIR = "hbase.bulkload.staging.dir"; + private SecureRandom random; private FileSystem fs; private Configuration conf; @@ -113,32 +116,14 @@ public class SecureBulkLoadManager { this.conf = conf; } - public void start() { + public void start() throws IOException { random = new SecureRandom(); - baseStagingDir = SecureBulkLoadUtil.getBaseStagingDir(conf); - this.userProvider = UserProvider.instantiate(conf); - - try { - fs = FileSystem.get(conf); - fs.mkdirs(baseStagingDir, PERM_HIDDEN); - fs.setPermission(baseStagingDir, PERM_HIDDEN); - FileStatus status = fs.getFileStatus(baseStagingDir); - //no sticky bit in hadoop-1.0, making directory nonempty so it never gets erased - fs.mkdirs(new Path(baseStagingDir,"DONOTERASE"), PERM_HIDDEN); - if (status == null) { - throw new IllegalStateException("Failed to create staging directory " - + baseStagingDir.toString()); - } - if (!status.getPermission().equals(PERM_HIDDEN)) { - throw new IllegalStateException( - "Staging directory already exists but permissions aren't set to '-rwx--x--x' " - + baseStagingDir.toString()); - } - } catch (IOException e) { - LOG.error("Failed to create or set permission on staging directory " - + baseStagingDir.toString(), e); - throw new IllegalStateException("Failed to create or set permission on staging directory " - + baseStagingDir.toString(), e); + userProvider = UserProvider.instantiate(conf); + fs = FileSystem.get(conf); + baseStagingDir = new Path(FSUtils.getRootDir(conf), HConstants.BULKLOAD_STAGING_DIR_NAME); + + if (conf.get(BULKLOAD_STAGING_DIR) != null) { + LOG.warn(BULKLOAD_STAGING_DIR + " is deprecated. Bulkload staging directory is " + baseStagingDir); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java index 9893e7e..cc02fb6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HFileReplicator.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.token.FsDelegationToken; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; /** @@ -82,7 +83,7 @@ public class HFileReplicator { private UserProvider userProvider; private Configuration conf; private Connection connection; - private String hbaseStagingDir; + private Path hbaseStagingDir; private ThreadPoolExecutor exec; private int maxCopyThreads; private int copiesPerThread; @@ -100,7 +101,7 @@ public class HFileReplicator { userProvider = UserProvider.instantiate(conf); fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); - this.hbaseStagingDir = conf.get("hbase.bulkload.staging.dir"); + this.hbaseStagingDir = new Path(FSUtils.getRootDir(conf), HConstants.BULKLOAD_STAGING_DIR_NAME); this.maxCopyThreads = this.conf.getInt(REPLICATION_BULKLOAD_COPY_MAXTHREADS_KEY, REPLICATION_BULKLOAD_COPY_MAXTHREADS_DEFAULT); @@ -253,7 +254,7 @@ public class HFileReplicator { // Create staging directory for each table Path stagingDir = - createStagingDir(new Path(hbaseStagingDir), user, TableName.valueOf(tableName)); + createStagingDir(hbaseStagingDir, user, TableName.valueOf(tableName)); familyHFilePathsPairsList = tableEntry.getValue(); familyHFilePathsPairsListSize = familyHFilePathsPairsList.size(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java index cc10fad..f7b2c51 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java @@ -141,9 +141,11 @@ public class HFileCorruptionChecker { Path regionDir = cfDir.getParent(); Path tableDir = regionDir.getParent(); - // build up the corrupted dirs strcture - Path corruptBaseDir = new Path(FSUtils.getRootDir(conf), conf.get( - "hbase.hfile.quarantine.dir", HConstants.CORRUPT_DIR_NAME)); + // build up the corrupted dirs structure + Path corruptBaseDir = new Path(FSUtils.getRootDir(conf), HConstants.CORRUPT_DIR_NAME); + if (conf.get("hbase.hfile.quarantine.dir") != null) { + LOG.warn("hbase.hfile.quarantine.dir is deprecated. Default to " + corruptBaseDir); + } Path corruptTableDir = new Path(corruptBaseDir, tableDir.getName()); Path corruptRegionDir = new Path(corruptTableDir, regionDir.getName()); Path corruptFamilyDir = new Path(corruptRegionDir, cfDir.getName()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 3e27834..b61d84c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -477,13 +477,11 @@ public class WALSplitter { final List corruptedLogs, final List processedLogs, final Path oldLogDir, final FileSystem fs, final Configuration conf) throws IOException { - final Path corruptDir = new Path(FSUtils.getRootDir(conf), conf.get( - "hbase.regionserver.hlog.splitlog.corrupt.dir", HConstants.CORRUPT_DIR_NAME)); - - if (!fs.mkdirs(corruptDir)) { - LOG.info("Unable to mkdir " + corruptDir); + final Path corruptDir = new Path(FSUtils.getRootDir(conf), HConstants.CORRUPT_DIR_NAME); + if (conf.get("hbase.regionserver.hlog.splitlog.corrupt.dir") != null) { + LOG.warn("hbase.regionserver.hlog.splitlog.corrupt.dir is deprecated. Default to " + + corruptDir); } - fs.mkdirs(oldLogDir); // this method can get restarted or called multiple times for archiving // the same log files. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index c164091..c480568 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1277,7 +1277,6 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility { } else { LOG.info("The hbase.fs.tmp.dir is set to " + hbaseFsTmpDirInString); } - this.conf.set("hbase.bulkload.staging.dir", this.conf.get("hbase.fs.tmp.dir")); } /** diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java index 9f2596c..b8c6d8f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFiles.java @@ -47,10 +47,10 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.regionserver.BloomType; -import org.apache.hadoop.hbase.security.SecureBulkLoadUtil; import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.testclassification.MapReduceTests; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.HFileTestUtil; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -302,7 +302,7 @@ public class TestLoadIncrementalHFiles { } // verify staging folder has been cleaned up - Path stagingBasePath = SecureBulkLoadUtil.getBaseStagingDir(util.getConfiguration()); + Path stagingBasePath = new Path(FSUtils.getRootDir(util.getConfiguration()), HConstants.BULKLOAD_STAGING_DIR_NAME); if(fs.exists(stagingBasePath)) { FileStatus[] files = fs.listStatus(stagingBasePath); for(FileStatus file : files) { diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java index 9ecc5d6..d195347 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java @@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.DelegationToken; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.PrepareBulkLoadRequest; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.PrepareBulkLoadResponse; import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos; -import org.apache.hadoop.hbase.security.SecureBulkLoadUtil; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.security.token.Token; @@ -166,7 +165,4 @@ public class SecureBulkLoadEndpointClient { } } - public Path getStagingPath(String bulkToken, byte[] family) throws IOException { - return SecureBulkLoadUtil.getStagingPath(table.getConfiguration(), bulkToken, family); - } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java index 3136416..251ec9e 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALSplit.java @@ -1076,8 +1076,7 @@ public class TestWALSplit { useDifferentDFSClient(); WALSplitter.split(HBASEDIR, WALDIR, OLDLOGDIR, fs, conf, wals); - final Path corruptDir = new Path(FSUtils.getRootDir(conf), conf.get( - "hbase.regionserver.hlog.splitlog.corrupt.dir", HConstants.CORRUPT_DIR_NAME)); + final Path corruptDir = new Path(FSUtils.getRootDir(conf), HConstants.CORRUPT_DIR_NAME); assertEquals(1, fs.listStatus(corruptDir).length); }