diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/Superusers.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/Superusers.java index f1a3877..510c5e4 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/Superusers.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/Superusers.java @@ -42,6 +42,7 @@ public final class Superusers { private static List superUsers; private static List superGroups; + private static User processUser; private Superusers(){} @@ -55,17 +56,17 @@ public final class Superusers { public static void initialize(Configuration conf) throws IOException { superUsers = new ArrayList<>(); superGroups = new ArrayList<>(); - User user = User.getCurrent(); + processUser = User.getCurrent(); - if (user == null) { + if (processUser == null) { throw new IllegalStateException("Unable to obtain the current user, " + "authorization checks for internal operations will not work correctly!"); } if (LOG.isTraceEnabled()) { - LOG.trace("Current user name is " + user.getShortName()); + LOG.trace("Current user name is " + processUser.getShortName()); } - String currentUser = user.getShortName(); + String currentUser = processUser.getShortName(); String[] superUserList = conf.getStrings(SUPERUSER_CONF_KEY, new String[0]); for (String name : superUserList) { if (AuthUtil.isGroupPrincipal(name)) { @@ -104,4 +105,8 @@ public final class Superusers { public static List getSuperUsers() { return superUsers; } + + public static User getProcessUser() { + return processUser; + } } \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index 74ff546..d240fe0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InterruptedIOException; +import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -40,6 +41,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; @@ -49,10 +51,12 @@ import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.Reference; +import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSHDFSUtils; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil; +import org.apache.hadoop.security.AccessControlException; /** * View to an on-disk Region. @@ -90,6 +94,9 @@ public class HRegionFileSystem { private static final int DEFAULT_HDFS_CLIENT_RETRIES_NUMBER = 10; private static final int DEFAULT_BASE_SLEEP_BEFORE_RETRIES = 1000; + private String filesOwner; + private String filesGroup; + /** * Create a view to the on-disk region * @param conf the {@link Configuration} to use @@ -455,9 +462,69 @@ public class HRegionFileSystem { srcPath = tmpPath; } + checkAndUpdateFileOwnerAndPermission(srcPath); return commitStoreFile(familyName, srcPath, seqNum, true); } + /** + * Ensures that the bulkloaded file has the correct owner:group for hbase + * or at least a 777 permission. + */ + private void checkAndUpdateFileOwnerAndPermission(final Path srcPath) + throws IOException { + // get the bulk-loaded file status + FileStatus fileStatus = fs.getFileStatus(srcPath); + + // SecureBulkLoadEndpoint should get us a 777 permission. + if (fileStatus.getPermission().equals(FSUtils.PERM_ALL_ACCESS)) { + // nothing to do, we have full access on the file + return; + } + + // In case we didn't run SecureBulkLoadEndpoint or the user didn't chmod the files + // try to find out owner and group each file should have. + if (filesOwner == null || filesGroup == null) { + // get user and group that is supposed to be set on the files from the region dir + FileStatus regionDirStatus = fs.getFileStatus(getRegionDir()); + filesOwner = regionDirStatus.getOwner(); + filesGroup = regionDirStatus.getGroup(); + } + + // check if the files has already the correct owner + if (fileStatus.getOwner().equals(filesOwner) && fileStatus.getGroup().equals(filesGroup)) { + // nothing to do, we are already the owner or we have full access + return; + } + + if (Superusers.getProcessUser() == null) { + // NOTE: This should happen only in testing when Superusers.initialize() is not called + LOG.warn("No process-user information available. Skipping file permission update. " + + fileStatus); + return; + } + + // try to change the owner, group and permission (last hope, assuming hbase is a superuser) + try { + Superusers.getProcessUser().runAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); + fs.setOwner(srcPath, filesOwner, filesGroup); + fs.setPermission(srcPath, perms); + return null; + } + }); + } catch (AccessControlException e) { + String msg = String.format("the bulkloaded file must be owned by %s:%s or have 777 mode. " + + "you can change the owner/permission manually before bulk-loading " + + "or enable the SecureBulkLoadEndpoint. %s", filesOwner, filesGroup, fileStatus); + LOG.error(msg); + throw new DoNotRetryIOException(msg, e); + } catch (InterruptedException e) { + throw (InterruptedIOException)new InterruptedIOException().initCause(e); + } + } + // =========================================================================== // Splits Helpers // =========================================================================== diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 18c86c8..b313154 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -93,6 +93,8 @@ public abstract class FSUtils { /** Full access permissions (starting point for a umask) */ public static final String FULL_RWX_PERMISSIONS = "777"; + public static final FsPermission PERM_ALL_ACCESS = FsPermission.valueOf("-rwxrwxrwx"); + private static final String THREAD_POOLSIZE = "hbase.client.localityCheck.threadPoolSize"; private static final int DEFAULT_THREAD_POOLSIZE = 2; diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java index 5f792fa..7ae019b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java @@ -39,6 +39,8 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.security.Superusers; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.FSUtils; @@ -237,4 +239,56 @@ public class TestHRegionFileSystem { fs.delete(rootDir, true); } + + @Test + public void testBulkloadFilesOwner() throws Exception { + final Configuration conf = TEST_UTIL.getConfiguration(); + Superusers.initialize(conf); + + final String TEST_OWNER = "testu"; + final String TEST_GROUP = "testg"; + User.createUserForTesting(conf, TEST_OWNER, new String[]{TEST_GROUP}); + + final FileSystem fs = TEST_UTIL.startMiniDFSCluster(1).getFileSystem();; + final Path rootDir = new Path("/testBulkloadFilesOwner"); + + final String familyName = "cf"; + final HRegionInfo hri = new HRegionInfo(TableName.valueOf("TestTable")); + try { + HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri); + + // Try to change permission to hbase + Path userBulkFile = new Path(rootDir, "test.bulk"); + FSUtils.create(fs, userBulkFile, new FsPermission("700"), true).close(); + fs.setOwner(userBulkFile, TEST_OWNER, TEST_GROUP); + FileStatus userStatus = fs.getFileStatus(userBulkFile); + LOG.debug("user file status: " + userStatus); + assertEquals(TEST_OWNER, userStatus.getOwner()); + assertEquals(TEST_GROUP, userStatus.getGroup()); + + Path hbaseBulkFile = regionFs.bulkLoadStoreFile(familyName, userBulkFile, 1); + FileStatus hbaseStatus = fs.getFileStatus(hbaseBulkFile); + LOG.debug("hbase file status: " + hbaseStatus); + assertEquals(Superusers.getProcessUser().getShortName(), hbaseStatus.getOwner()); + assertFalse(userStatus.getOwner().equals(hbaseStatus.getOwner())); + assertFalse(userStatus.getGroup().equals(hbaseStatus.getGroup())); + + // Check if we are preserving user:group with 777 permission (like SecureBulkEndpoint does) + FSUtils.create(fs, userBulkFile, new FsPermission("777"), true).close(); + fs.setOwner(userBulkFile, TEST_OWNER, TEST_GROUP); + userStatus = fs.getFileStatus(userBulkFile); + LOG.debug("user file status: " + userStatus); + assertEquals(TEST_OWNER, userStatus.getOwner()); + assertEquals(TEST_GROUP, userStatus.getGroup()); + + hbaseBulkFile = regionFs.bulkLoadStoreFile(familyName, userBulkFile, 1); + hbaseStatus = fs.getFileStatus(hbaseBulkFile); + LOG.debug("hbase file status: " + hbaseStatus); + assertEquals(TEST_OWNER, userStatus.getOwner()); + assertEquals(TEST_GROUP, userStatus.getGroup()); + } finally { + fs.delete(rootDir, true); + TEST_UTIL.shutdownMiniDFSCluster(); + } + } }