diff --git src/main/java/org/apache/hadoop/hbase/HConstants.java src/main/java/org/apache/hadoop/hbase/HConstants.java index e60ce04..1624fea 100644 --- src/main/java/org/apache/hadoop/hbase/HConstants.java +++ src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -606,6 +606,11 @@ public final class HConstants { /** Host name of the local machine */ public static final String LOCALHOST = "localhost"; + /** Enable file permission modification from standard hbase */ + public static final String ENABLE_DATA_FILE_UMASK = "hbase.data.umask.enable"; + /** File permission umask to use when creating hbase data files */ + public static final String DATA_FILE_UMASK_KEY = "hbase.data.umask"; + private HConstants() { // Can't be instantiated with this ctor. } diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java index 9e7e624..10cb894 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java @@ -30,10 +30,12 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.KeyValue.KeyComparator; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.io.RawComparator; import org.apache.hadoop.io.Writable; @@ -266,7 +268,9 @@ public abstract class AbstractHFileWriter extends SchemaConfigured /** A helper method to create HFile output streams in constructors */ protected static FSDataOutputStream createOutputStream(Configuration conf, FileSystem fs, Path path) throws IOException { - return fs.create(path, FsPermission.getDefault(), true, + FsPermission perms = FSUtils.getFilePermissions(fs, conf, + HConstants.DATA_FILE_UMASK_KEY); + return fs.create(path, perms, true, fs.getConf().getInt("io.file.buffer.size", 4096), fs.getDefaultReplication(), fs.getDefaultBlockSize(), null); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 76ff422..19dcb06 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -65,6 +65,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.DroppedSnapshotException; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -78,7 +79,6 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.NotServingRegionException; import org.apache.hadoop.hbase.UnknownScannerException; import org.apache.hadoop.hbase.client.Append; -import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Increment; @@ -88,13 +88,13 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Row; import org.apache.hadoop.hbase.client.RowLock; +import org.apache.hadoop.hbase.client.RowMutations; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.coprocessor.Exec; import org.apache.hadoop.hbase.client.coprocessor.ExecResult; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.filter.IncompatibleFilterException; -import org.apache.hadoop.hbase.filter.NullComparator; import org.apache.hadoop.hbase.filter.WritableByteArrayComparable; import org.apache.hadoop.hbase.io.HeapSize; import org.apache.hadoop.hbase.io.TimeRange; @@ -339,6 +339,9 @@ public class HRegion implements HeapSize { // , Writable{ public static final ConcurrentMap> timeVaryingMetrics = new ConcurrentHashMap>(); + + /** Configuration key for the permissions to use when creating the .regioninfo */ + private static final String HREGION_INFO_PERMISSION_KEY = "hbase.hregion.info.permissions"; public static void incrNumericMetric(String key, long amount) { AtomicLong oldVal = numericMetrics.get(key); @@ -761,8 +764,17 @@ public class HRegion implements HeapSize { // , Writable{ // create but before close. If we don't successfully close the file, // subsequent region reopens will fail the below because create is // registered in NN. + + // first check to get the permissions + FsPermission perms = FSUtils.getFilePermissions(fs, conf, + HConstants.DATA_FILE_UMASK_KEY); + LOG.debug("Creating .regioninfo with perms: " + perms); + + // and then create the file Path tmpPath = new Path(getTmpDir(), REGIONINFO_FILE); - FSDataOutputStream out = this.fs.create(tmpPath, true); + FSDataOutputStream out = fs.create(tmpPath, perms, true, + fs.getConf().getInt("io.file.buffer.size", 4096), + fs.getDefaultReplication(), fs.getDefaultBlockSize(), null); try { this.regionInfo.write(out); out.write('\n'); diff --git src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 62cf6ac..aa4e115 100644 --- src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -39,10 +39,11 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableDescriptors; -import org.apache.hadoop.hbase.TableExistsException; /** @@ -205,7 +206,10 @@ public class FSTableDescriptors implements TableDescriptors { if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) { throw new NotImplementedException(); } - if (!this.fsreadonly) updateHTableDescriptor(this.fs, this.rootdir, htd); + if (!this.fsreadonly) { + updateHTableDescriptor(HBaseConfiguration.create(), this.fs, + this.rootdir, htd); + } long modtime = getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString()); this.cache.put(htd.getNameAsString(), new TableDescriptorModtime(modtime, htd)); } @@ -414,11 +418,10 @@ public class FSTableDescriptors implements TableDescriptors { * @return New tableinfo or null if we failed update. * @throws IOException Thrown if failed update. */ - static Path updateHTableDescriptor(FileSystem fs, Path rootdir, - HTableDescriptor hTableDescriptor) - throws IOException { + static Path updateHTableDescriptor(Configuration conf, FileSystem fs, + Path rootdir, HTableDescriptor hTableDescriptor) throws IOException { Path tableDir = FSUtils.getTablePath(rootdir, hTableDescriptor.getName()); - Path p = writeTableDescriptor(fs, hTableDescriptor, tableDir, + Path p = writeTableDescriptor(conf, fs, hTableDescriptor, tableDir, getTableInfoPath(fs, tableDir)); if (p == null) throw new IOException("Failed update"); LOG.info("Updated tableinfo=" + p); @@ -447,7 +450,8 @@ public class FSTableDescriptors implements TableDescriptors { * @return Descriptor file or null if we failed write. * @throws IOException */ - private static Path writeTableDescriptor(final FileSystem fs, + private static Path writeTableDescriptor(final Configuration conf, + final FileSystem fs, final HTableDescriptor hTableDescriptor, final Path tableDir, final FileStatus status) throws IOException { @@ -474,7 +478,7 @@ public class FSTableDescriptors implements TableDescriptors { continue; } try { - writeHTD(fs, p, hTableDescriptor); + writeHTD(conf, fs, p, hTableDescriptor); tableInfoPath = getTableInfoFileName(tableDir, sequenceid); if (!fs.rename(p, tableInfoPath)) { throw new IOException("Failed rename of " + p + " to " + tableInfoPath); @@ -499,10 +503,11 @@ public class FSTableDescriptors implements TableDescriptors { return tableInfoPath; } - private static void writeHTD(final FileSystem fs, final Path p, - final HTableDescriptor htd) - throws IOException { - FSDataOutputStream out = fs.create(p, false); + private static void writeHTD(final Configuration conf, final FileSystem fs, + final Path p, final HTableDescriptor htd) throws IOException { + FsPermission perm = FSUtils.getFilePermissions(fs, conf, + HConstants.DATA_FILE_UMASK_KEY); + FSDataOutputStream out = FSUtils.create(conf, fs, p, perm, false); try { htd.write(out); out.write('\n'); @@ -580,8 +585,10 @@ public class FSTableDescriptors implements TableDescriptors { } } } - Path p = writeTableDescriptor(fs, htableDescriptor, - FSUtils.getTablePath(rootdir, htableDescriptor.getNameAsString()), status); + Path p = writeTableDescriptor(HBaseConfiguration.create(), fs, + htableDescriptor, + FSUtils.getTablePath(rootdir, htableDescriptor.getNameAsString()), + status); return p != null; } } diff --git src/main/java/org/apache/hadoop/hbase/util/FSUtils.java src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index d2d7efe..c355b91 100644 --- src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HRegionInfo; @@ -60,7 +61,9 @@ import org.apache.hadoop.util.StringUtils; @InterfaceStability.Evolving public abstract class FSUtils { private static final Log LOG = LogFactory.getLog(FSUtils.class); - + + private static final String FULL_RWX_PERMISSIONS = "777"; + protected FSUtils() { super(); } @@ -105,21 +108,90 @@ public abstract class FSUtils { } /** - * Create file. - * @param fs filesystem object - * @param p path to create - * @return Path - * @throws IOException e + * Create the specified file on the filesystem. By default, this will: + *
    + *
  1. overwrite the file if it exists
  2. + *
  3. apply the umask in the configuration (if it is enabled)
  4. + *
  5. use the fs configured buffer size (or {@value DEFAULT_BUFFER_SIZE} if + * not set)
  6. + *
  7. use the default replication
  8. + *
  9. use the default block size
  10. + *
  11. not track progress
  12. + *
+ * + * @param fs {@link FileSystem} on which to write the file + * @param path {@link Path} to the file to write + * @return output stream to the created file + * @throws IOException if the file cannot be created + */ + public static FSDataOutputStream create(Configuration conf, FileSystem fs, + Path p, FsPermission perm) throws IOException { + return create(conf, fs, p, perm, true); + } + + /** + * Create the specified file on the filesystem. By default, this will: + *
    + *
  1. apply the umask in the configuration (if it is enabled)
  2. + *
  3. use the fs configured buffer size (or {@value DEFAULT_BUFFER_SIZE} if + * not set)
  4. + *
  5. use the default replication
  6. + *
  7. use the default block size
  8. + *
  9. not track progress
  10. + *
+ * + * @param fs {@link FileSystem} on which to write the file + * @param path {@link Path} to the file to write + * @param perm + * @param overwrite Whether or not the created file should be overwritten. + * @return output stream to the created file + * @throws IOException if the file cannot be created */ - public static Path create(final FileSystem fs, final Path p) - throws IOException { - if (fs.exists(p)) { - throw new IOException("File already exists " + p.toString()); - } - if (!fs.createNewFile(p)) { - throw new IOException("Failed create of " + p); + public static FSDataOutputStream create(Configuration conf, FileSystem fs, + Path path, FsPermission perm, boolean overwrite) throws IOException { + LOG.info("--Creating file:" + path + "with permission:" + perm); + + return fs.create(path, perm, overwrite, + fs.getConf().getInt("io.file.buffer.size", 4096), + fs.getDefaultReplication(), fs.getDefaultBlockSize(), null); + } + + /** + * Get the file permissions specified in the configuration, if they are + * enabled. + * + * @param fs filesystem that the file will be created on. + * @param conf configuration to read for determining if permissions are + * enabled and which to use + * @param permssionConfKey property key in the configuration to use when + * finding the permission + * @return the permission to use when creating a new file on the fs. If + * special permissions are not specified in the configuration, then + * the default permissions on the the fs will be returned. + */ + public static FsPermission getFilePermissions(final FileSystem fs, + final Configuration conf, final String permssionConfKey) { + boolean enablePermissions = conf.getBoolean( + HConstants.ENABLE_DATA_FILE_UMASK, false); + + if (enablePermissions) { + try { + FsPermission perm = new FsPermission(FULL_RWX_PERMISSIONS); + FsPermission umask = new FsPermission(conf.get(permssionConfKey)); + return perm.applyUMask(umask); + } catch (IllegalArgumentException e) { + LOG.warn( + "Incorrect umask attempted to be created: " + + conf.get(permssionConfKey) + + ", using default file permissions.", e); + return FsPermission.getDefault(); + } catch (NullPointerException e) { + LOG.warn("No permissions stored in conf for " + permssionConfKey + + ", using default file permissions."); + return FsPermission.getDefault(); + } } - return p; + return FsPermission.getDefault(); } /** diff --git src/main/resources/hbase-default.xml src/main/resources/hbase-default.xml index 9277e0c..817e825 100644 --- src/main/resources/hbase-default.xml +++ src/main/resources/hbase-default.xml @@ -869,4 +869,18 @@ value to 0. + + hbase.data.umask.enable + false + Enable, if true, that file permissions should be assigned + to the files written by the regionserver + + + + hbase.data.umask + 000 + File permissions that should be used to write data + files when hbase.file.permissions.enable is true + + diff --git src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index 0db4d42..7aed3f7 100644 --- src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -17,7 +17,11 @@ */ package org.apache.hadoop.hbase.util; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import java.io.FileNotFoundException; import java.io.IOException; @@ -28,7 +32,13 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableExistsException; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -62,7 +72,8 @@ public class TestFSTableDescriptors { FileStatus [] statuses = fs.listStatus(testdir); assertTrue("statuses.length="+statuses.length, statuses.length == 1); for (int i = 0; i < 10; i++) { - FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); + FSTableDescriptors.updateHTableDescriptor(UTIL.getConfiguration(), fs, + testdir, htd); } statuses = fs.listStatus(testdir); assertTrue(statuses.length == 1); @@ -76,14 +87,17 @@ public class TestFSTableDescriptors { Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo"); HTableDescriptor htd = new HTableDescriptor("testSequenceidAdvancesOnTableInfo"); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - Path p0 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); + Path p0 = FSTableDescriptors.updateHTableDescriptor( + UTIL.getConfiguration(), fs, testdir, htd); int i0 = FSTableDescriptors.getTableInfoSequenceid(p0); - Path p1 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); + Path p1 = FSTableDescriptors.updateHTableDescriptor( + UTIL.getConfiguration(), fs, testdir, htd); // Assert we cleaned up the old file. assertTrue(!fs.exists(p0)); int i1 = FSTableDescriptors.getTableInfoSequenceid(p1); assertTrue(i1 == i0 + 1); - Path p2 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); + Path p2 = FSTableDescriptors.updateHTableDescriptor( + UTIL.getConfiguration(), fs, testdir, htd); // Assert we cleaned up the old file. assertTrue(!fs.exists(p1)); int i2 = FSTableDescriptors.getTableInfoSequenceid(p2); @@ -183,7 +197,8 @@ public class TestFSTableDescriptors { for (int i = 0; i < count; i++) { HTableDescriptor htd = new HTableDescriptor(name + i); htd.addFamily(new HColumnDescriptor("" + i)); - FSTableDescriptors.updateHTableDescriptor(fs, rootdir, htd); + FSTableDescriptors.updateHTableDescriptor(UTIL.getConfiguration(), fs, + rootdir, htd); } // Wait a while so mod time we write is for sure different. Thread.sleep(100); diff --git src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java index e2611e6..7d01c6a 100644 --- src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java +++ src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java @@ -19,14 +19,21 @@ */ package org.apache.hadoop.hbase.util; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HDFSBlocksDistribution; +import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -146,6 +153,17 @@ public class TestFSUtils { } + @Test + public void testPermMask() throws Exception { + Configuration conf = HBaseConfiguration.create(); + conf.setBoolean(HConstants.ENABLE_DATA_FILE_UMASK, true); + conf.setStrings(HConstants.DATA_FILE_UMASK_KEY, "077"); + + FileSystem fs = FileSystem.get(conf); + FsPermission filePerm = FSUtils.getFilePermissions(fs, conf, + HConstants.DATA_FILE_UMASK_KEY); + assertEquals(new FsPermission("700"), filePerm); + } @org.junit.Rule public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =