diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/Superusers.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/Superusers.java index f1a3877..510c5e4 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/security/Superusers.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/security/Superusers.java @@ -42,6 +42,7 @@ public final class Superusers { private static List superUsers; private static List superGroups; + private static User processUser; private Superusers(){} @@ -55,17 +56,17 @@ public final class Superusers { public static void initialize(Configuration conf) throws IOException { superUsers = new ArrayList<>(); superGroups = new ArrayList<>(); - User user = User.getCurrent(); + processUser = User.getCurrent(); - if (user == null) { + if (processUser == null) { throw new IllegalStateException("Unable to obtain the current user, " + "authorization checks for internal operations will not work correctly!"); } if (LOG.isTraceEnabled()) { - LOG.trace("Current user name is " + user.getShortName()); + LOG.trace("Current user name is " + processUser.getShortName()); } - String currentUser = user.getShortName(); + String currentUser = processUser.getShortName(); String[] superUserList = conf.getStrings(SUPERUSER_CONF_KEY, new String[0]); for (String name : superUserList) { if (AuthUtil.isGroupPrincipal(name)) { @@ -104,4 +105,8 @@ public final class Superusers { public static List getSuperUsers() { return superUsers; } + + public static User getProcessUser() { + return processUser; + } } \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index 74ff546..ad452f4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -22,6 +22,7 @@ package org.apache.hadoop.hbase.regionserver; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InterruptedIOException; +import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -49,6 +50,7 @@ import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.backup.HFileArchiver; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.Reference; +import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FSHDFSUtils; import org.apache.hadoop.hbase.util.FSUtils; @@ -90,6 +92,9 @@ public class HRegionFileSystem { private static final int DEFAULT_HDFS_CLIENT_RETRIES_NUMBER = 10; private static final int DEFAULT_BASE_SLEEP_BEFORE_RETRIES = 1000; + private String filesOwner; + private String filesGroup; + /** * Create a view to the on-disk region * @param conf the {@link Configuration} to use @@ -455,9 +460,52 @@ public class HRegionFileSystem { srcPath = tmpPath; } + srcPath = checkAndUpdateFileOwnerAndPermission(srcPath); return commitStoreFile(familyName, srcPath, seqNum, true); } + private Path checkAndUpdateFileOwnerAndPermission(final Path srcPath) + throws IOException { + FileStatus fileStatus = fs.getFileStatus(srcPath); + if (fileStatus.getOwner().equals(filesOwner) && fileStatus.getGroup().equals(filesGroup)) { + // nothing to do, we are already the owner + return srcPath; + } + + if (Superusers.getProcessUser() == null) { + // NOTE: This should happen only in testing when Superusers.initialize() is not called + LOG.warn("No process-user information available. Skipping file permission update. " + + fileStatus); + return srcPath; + } + + final Path dstPath = createTempName(); + if (!fs.exists(dstPath.getParent()) && !createDir(dstPath.getParent())) { + throw new IOException("Failed creating " + dstPath.getParent()); + } + + if (!rename(srcPath, dstPath)) { + throw new IOException("Failed rename of " + srcPath + " to " + dstPath); + } + + // change the owner, group and permission + try { + Superusers.getProcessUser().runAs(new PrivilegedExceptionAction() { + @Override + public Void run() throws Exception { + FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); + fs.setOwner(dstPath, filesOwner, filesGroup); + fs.setPermission(dstPath, perms); + return null; + } + }); + } catch (InterruptedException e) { + throw (InterruptedIOException)new InterruptedIOException().initCause(e); + } + + return dstPath; + } + // =========================================================================== // Splits Helpers // =========================================================================== @@ -882,6 +930,11 @@ public class HRegionFileSystem { throw new IOException("Unable to create region directory: " + regionDir); } + // get user and group that is supposed to be set on the files + FileStatus regionDirStatus = fs.getFileStatus(regionDir); + regionFs.filesOwner = regionDirStatus.getOwner(); + regionFs.filesGroup = regionDirStatus.getGroup(); + // Write HRI to a file in case we need to recover hbase:meta regionFs.writeRegionInfoOnFilesystem(false); return regionFs; @@ -902,12 +955,19 @@ public class HRegionFileSystem { HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo); Path regionDir = regionFs.getRegionDir(); - if (!fs.exists(regionDir)) { + FileStatus regionDirStatus; + try { + regionDirStatus = fs.getFileStatus(regionDir); + } catch (FileNotFoundException e) { LOG.warn("Trying to open a region that do not exists on disk: " + regionDir); throw new IOException("The specified region do not exists on disk: " + regionDir); } if (!readOnly) { + // get user and group that is supposed to be set on the files + regionFs.filesOwner = regionDirStatus.getOwner(); + regionFs.filesGroup = regionDirStatus.getGroup(); + // Cleanup temporary directories regionFs.cleanupTempDir(); regionFs.cleanupSplitsDir(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java index 5f792fa..df1f0a5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.net.URI; import java.util.Collection; +import org.apache.commons.lang.ArrayUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -39,6 +40,8 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.security.Superusers; +import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.testclassification.RegionServerTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.FSUtils; @@ -237,4 +240,41 @@ public class TestHRegionFileSystem { fs.delete(rootDir, true); } + + @Test + public void testBulkloadFilesOwner() throws Exception { + final Configuration conf = TEST_UTIL.getConfiguration(); + Superusers.initialize(conf); + + final String TEST_OWNER = "testu"; + final String TEST_GROUP = "testg"; + User.createUserForTesting(conf, TEST_OWNER, new String[]{TEST_GROUP}); + + final FileSystem fs = TEST_UTIL.startMiniDFSCluster(1).getFileSystem();; + final Path rootDir = new Path("/testBulkloadFilesOwner"); + + final String familyName = "cf"; + final HRegionInfo hri = new HRegionInfo(TableName.valueOf("TestTable")); + try { + HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(conf, fs, rootDir, hri); + + Path userBulkFile = new Path(rootDir, "test.bulk"); + FSUtils.create(fs, userBulkFile, new FsPermission("777"), true).close(); + fs.setOwner(userBulkFile, TEST_OWNER, TEST_GROUP); + FileStatus userStatus = fs.getFileStatus(userBulkFile); + LOG.debug("user file status: " + userStatus); + assertEquals(TEST_OWNER, userStatus.getOwner()); + assertEquals(TEST_GROUP, userStatus.getGroup()); + + Path hbaseBulkFile = regionFs.bulkLoadStoreFile(familyName, userBulkFile, 1); + FileStatus hbaseStatus = fs.getFileStatus(hbaseBulkFile); + LOG.debug("hbase file status: " + hbaseStatus); + assertEquals(Superusers.getProcessUser().getShortName(), hbaseStatus.getOwner()); + assertFalse(userStatus.getOwner().equals(hbaseStatus.getOwner())); + assertFalse(userStatus.getGroup().equals(hbaseStatus.getGroup())); + } finally { + fs.delete(rootDir, true); + TEST_UTIL.shutdownMiniDFSCluster(); + } + } }