diff --git src/main/java/org/apache/hadoop/hbase/HConstants.java src/main/java/org/apache/hadoop/hbase/HConstants.java index e60ce04..a60c6c1 100644 --- src/main/java/org/apache/hadoop/hbase/HConstants.java +++ src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -605,6 +605,7 @@ public final class HConstants { /** Host name of the local machine */ public static final String LOCALHOST = "localhost"; + public static final String ENABLE_FILE_PERMISSIONS = "hbase.file.permissions.enable"; private HConstants() { // Can't be instantiated with this ctor. diff --git src/main/java/org/apache/hadoop/hbase/io/Reference.java src/main/java/org/apache/hadoop/hbase/io/Reference.java index 6360059..f31e80a 100644 --- src/main/java/org/apache/hadoop/hbase/io/Reference.java +++ src/main/java/org/apache/hadoop/hbase/io/Reference.java @@ -24,6 +24,7 @@ import java.io.DataOutput; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -125,9 +126,9 @@ public class Reference implements Writable { return r.equals(Range.top); } - public Path write(final FileSystem fs, final Path p) + public Path write(Configuration conf, final FileSystem fs, final Path p) throws IOException { - FSDataOutputStream out = fs.create(p, false); + FSDataOutputStream out = FSUtils.createFile(conf, fs, p, false); try { write(out); } finally { diff --git src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java index 9e7e624..ae04cd0 100644 --- src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java +++ src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java @@ -29,11 +29,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.KeyValue.KeyComparator; import org.apache.hadoop.hbase.io.hfile.HFile.FileInfo; import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.io.RawComparator; import org.apache.hadoop.io.Writable; @@ -44,6 +44,8 @@ import org.apache.hadoop.io.Writable; public abstract class AbstractHFileWriter extends SchemaConfigured implements HFile.Writer { + private static final String HFILE_PERMISSION_KEY = "hbase.hfile.store.permissions"; + /** Key previously appended. Becomes the last key in the file. */ protected byte[] lastKeyBuffer = null; @@ -266,9 +268,6 @@ public abstract class AbstractHFileWriter extends SchemaConfigured /** A helper method to create HFile output streams in constructors */ protected static FSDataOutputStream createOutputStream(Configuration conf, FileSystem fs, Path path) throws IOException { - return fs.create(path, FsPermission.getDefault(), true, - fs.getConf().getInt("io.file.buffer.size", 4096), - fs.getDefaultReplication(), fs.getDefaultBlockSize(), - null); + return FSUtils.createFile(conf, fs, path); } } diff --git src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java index c726603..bce4ea4 100644 --- src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java +++ src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat.java @@ -41,6 +41,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.filecache.DistributedCache; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; @@ -48,12 +49,12 @@ import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.Compression; import org.apache.hadoop.hbase.io.hfile.HFile; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.regionserver.TimeRangeTracker; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.WritableUtils; @@ -124,7 +125,8 @@ public class HFileOutputFormat extends FileOutputFormat> timeVaryingMetrics = new ConcurrentHashMap>(); + + /** Configuration key for the permissions to use when creating the .regioninfo */ + private static final String HREGION_INFO_PERMISSION_KEY = "hbase.hregion.info.permissions"; public static void incrNumericMetric(String key, long amount) { AtomicLong oldVal = numericMetrics.get(key); @@ -762,7 +764,8 @@ public class HRegion implements HeapSize { // , Writable{ // subsequent region reopens will fail the below because create is // registered in NN. Path tmpPath = new Path(getTmpDir(), REGIONINFO_FILE); - FSDataOutputStream out = this.fs.create(tmpPath, true); + FSDataOutputStream out = FSUtils.createFile(conf, fs, tmpPath); + try { this.regionInfo.write(out); out.write('\n'); @@ -3557,7 +3560,7 @@ public class HRegion implements HeapSize { // , Writable{ HTableDescriptor.getTableDir(rootDir, info.getTableName()); Path regionDir = HRegion.getRegionDir(tableDir, info.getEncodedName()); FileSystem fs = FileSystem.get(conf); - fs.mkdirs(regionDir); + FSUtils.mkdirs(conf, fs, regionDir); HLog effectiveHLog = hlog; if (hlog == null) { effectiveHLog = new HLog(fs, new Path(regionDir, HConstants.HREGION_LOGDIR_NAME), @@ -3769,18 +3772,21 @@ public class HRegion implements HeapSize { // , Writable{ /** * Make the directories for a specific column family - * + * * @param fs the file system - * @param tabledir base directory where region will live (usually the table dir) + * @param conf + * @param tabledir base directory where region will live (usually the table + * dir) * @param hri * @param colFamily the column family * @throws IOException */ - public static void makeColumnFamilyDirs(FileSystem fs, Path tabledir, + public static void makeColumnFamilyDirs(FileSystem fs, Configuration conf, + Path tabledir, final HRegionInfo hri, byte [] colFamily) throws IOException { Path dir = Store.getStoreHomedir(tabledir, hri.getEncodedName(), colFamily); - if (!fs.mkdirs(dir)) { + if (!FSUtils.mkdirs(conf, fs, dir)) { LOG.warn("Failed to create " + dir); } } @@ -3890,7 +3896,7 @@ public class HRegion implements HeapSize { // , Writable{ throw new IOException("Cannot merge; target file collision at " + newRegionDir); } - fs.mkdirs(newRegionDir); + FSUtils.mkdirs(conf, fs, newRegionDir); LOG.info("starting merge of regions: " + a + " and " + b + " into new region " + newRegionInfo.toString() + @@ -3904,7 +3910,7 @@ public class HRegion implements HeapSize { // , Writable{ byFamily = filesByFamily(byFamily, b.close()); for (Map.Entry> es : byFamily.entrySet()) { byte [] colFamily = es.getKey(); - makeColumnFamilyDirs(fs, tableDir, newRegionInfo, colFamily); + makeColumnFamilyDirs(fs, conf, tableDir, newRegionInfo, colFamily); // Because we compacted the source regions we should have no more than two // HStoreFiles per family and there will be no reference store List srcFiles = es.getValue(); diff --git src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java index 3f42efa..4e1bd23 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java @@ -247,7 +247,12 @@ public class SplitTransaction { } this.journal.add(JournalEntry.SET_SPLITTING_IN_ZK); - createSplitDir(this.parent.getFilesystem(), this.splitdir); + // create the splits directory + // if server is null, then don't worry do any special configuration + // otherwise, use the server's conf to configure the directory creation + createSplitDir(this.parent.getFilesystem(), + server == null ? new Configuration(false) : server.getConfiguration(), + this.splitdir); this.journal.add(JournalEntry.CREATE_SPLIT_DIR); List hstoreFilesToSplit = null; @@ -534,12 +539,14 @@ public class SplitTransaction { /** * @param fs Filesystem to use + * @pigurationaram conf * @param splitdir Directory to store temporary split data in - * @throws IOException If splitdir already exists or we fail - * to create it. + * @throws IOException If splitdir already exists or we fail to + * create it. * @see #cleanupSplitDir(FileSystem, Path) */ - private static void createSplitDir(final FileSystem fs, final Path splitdir) + private static void createSplitDir(final FileSystem fs, Configuration conf, + final Path splitdir) throws IOException { if (fs.exists(splitdir)) { LOG.info("The " + splitdir @@ -549,7 +556,8 @@ public class SplitTransaction { + " before creating them again."); } } - if (!fs.mkdirs(splitdir)) throw new IOException("Failed create of " + splitdir); + if (!FSUtils.mkdirs(conf, fs, splitdir)) + throw new IOException("Failed create of " + splitdir); } private static void cleanupSplitDir(final FileSystem fs, final Path splitdir) @@ -640,10 +648,12 @@ public class SplitTransaction { byte [] family = sf.getFamily(); String encoded = this.hri_a.getEncodedName(); Path storedir = Store.getStoreHomedir(splitdir, encoded, family); - StoreFile.split(fs, storedir, sf, this.splitrow, Range.bottom); + StoreFile.split(this.parent.getConf(), fs, storedir, sf, this.splitrow, + Range.bottom); encoded = this.hri_b.getEncodedName(); storedir = Store.getStoreHomedir(splitdir, encoded, family); - StoreFile.split(fs, storedir, sf, this.splitrow, Range.top); + StoreFile.split(this.parent.getConf(), fs, storedir, sf, this.splitrow, + Range.top); } /** diff --git src/main/java/org/apache/hadoop/hbase/regionserver/Store.java src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index d884598..6bd033b 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -180,7 +180,7 @@ public class Store extends SchemaConfigured implements HeapSize { this.fs = fs; this.homedir = getStoreHomedir(basedir, info.getEncodedName(), family.getName()); if (!this.fs.exists(this.homedir)) { - if (!this.fs.mkdirs(this.homedir)) + if (!FSUtils.mkdirs(conf, fs, homedir)) throw new IOException("Failed create of: " + this.homedir.toString()); } this.region = region; diff --git src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 12ebc0a..cc78f5c 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -49,17 +49,17 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.HalfStoreFileReader; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; -import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.BlockType; +import org.apache.hadoop.hbase.io.hfile.CacheConfig; import org.apache.hadoop.hbase.io.hfile.Compression; import org.apache.hadoop.hbase.io.hfile.HFile; +import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; import org.apache.hadoop.hbase.io.hfile.HFileScanner; import org.apache.hadoop.hbase.io.hfile.HFileWriterV1; import org.apache.hadoop.hbase.io.hfile.HFileWriterV2; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; -import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured; -import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; import org.apache.hadoop.hbase.io.hfile.NoOpDataBlockEncoder; +import org.apache.hadoop.hbase.regionserver.metrics.SchemaConfigured; +import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics; import org.apache.hadoop.hbase.util.BloomFilter; import org.apache.hadoop.hbase.util.BloomFilterFactory; import org.apache.hadoop.hbase.util.BloomFilterWriter; @@ -781,7 +781,7 @@ public class StoreFile extends SchemaConfigured { } if (!fs.exists(dir)) { - fs.mkdirs(dir); + FSUtils.mkdirs(conf, fs, dir); } if (filePath == null) { @@ -846,24 +846,23 @@ public class StoreFile extends SchemaConfigured { /** * Write out a split reference. - * + * * Package local so it doesnt leak out of regionserver. - * + * + * @param configuration + * * @param fs * @param splitDir Presumes path format is actually - * SOME_DIRECTORY/REGIONNAME/FAMILY. + * SOME_DIRECTORY/REGIONNAME/FAMILY. * @param f File to split. * @param splitRow * @param range * @return Path to created reference. * @throws IOException */ - static Path split(final FileSystem fs, - final Path splitDir, - final StoreFile f, - final byte [] splitRow, - final Reference.Range range) - throws IOException { + static Path split(Configuration conf, final FileSystem fs, + final Path splitDir, final StoreFile f, final byte[] splitRow, + final Reference.Range range) throws IOException { // A reference to the bottom half of the hsf store file. Reference r = new Reference(splitRow, range); // Add the referred-to regions name as a dot separated suffix. @@ -874,7 +873,7 @@ public class StoreFile extends SchemaConfigured { // Write reference with same file id only with the other region name as // suffix and into the new region location (under same family). Path p = new Path(splitDir, f.getPath().getName() + "." + parentRegionName); - return r.write(fs, p); + return r.write(conf, fs, p); } diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java index b5049b1..5910e56 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java @@ -409,12 +409,12 @@ public class HLog implements Syncable { if (failIfLogDirExists && fs.exists(dir)) { throw new IOException("Target HLog directory already exists: " + dir); } - if (!fs.mkdirs(dir)) { + if (!FSUtils.mkdirs(conf, fs, dir)) { throw new IOException("Unable to mkdir " + dir); } this.oldLogDir = oldLogDir; if (!fs.exists(oldLogDir)) { - if (!fs.mkdirs(this.oldLogDir)) { + if (!FSUtils.mkdirs(conf, fs, oldLogDir)) { throw new IOException("Unable to mkdir " + this.oldLogDir); } } diff --git src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java index 5c8fc5e..5687836 100644 --- src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java +++ src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java @@ -386,7 +386,7 @@ public class HLogSplitter { in = getReader(fs, logfile, conf, skipErrors); } catch (CorruptedLogFileException e) { LOG.warn("Could not get reader, corrupted log file " + logPath, e); - ZKSplitLog.markCorrupted(rootDir, tmpname, fs); + ZKSplitLog.markCorrupted(conf, rootDir, tmpname, fs); isCorrupted = true; } if (in == null) { @@ -448,7 +448,7 @@ public class HLogSplitter { } } catch (CorruptedLogFileException e) { LOG.warn("Could not parse, corrupted log file " + logPath, e); - ZKSplitLog.markCorrupted(rootDir, tmpname, fs); + ZKSplitLog.markCorrupted(conf, rootDir, tmpname, fs); isCorrupted = true; } catch (IOException e) { e = RemoteExceptionHandler.checkIOException(e); @@ -549,7 +549,7 @@ public class HLogSplitter { } else { Path dstdir = dst.getParent(); if (!fs.exists(dstdir)) { - if (!fs.mkdirs(dstdir)) LOG.warn("mkdir failed on " + dstdir); + if (!FSUtils.mkdirs(conf, fs, dstdir)) LOG.warn("mkdir failed on " + dstdir); } } fs.rename(src, dst); @@ -602,10 +602,10 @@ public class HLogSplitter { final Path corruptDir = new Path(conf.get(HConstants.HBASE_DIR), conf.get( "hbase.regionserver.hlog.splitlog.corrupt.dir", ".corrupt")); - if (!fs.mkdirs(corruptDir)) { + if (!FSUtils.mkdirs(conf, fs, corruptDir)) { LOG.info("Unable to mkdir " + corruptDir); } - fs.mkdirs(oldLogDir); + FSUtils.mkdirs(conf, fs, oldLogDir); // this method can get restarted or called multiple times for archiving // the same log files. @@ -641,18 +641,20 @@ public class HLogSplitter { /** * Path to a file under RECOVERED_EDITS_DIR directory of the region found in * logEntry named for the sequenceid in the passed - * logEntry: e.g. /hbase/some_table/2323432434/recovered.edits/2332. - * This method also ensures existence of RECOVERED_EDITS_DIR under the region - * creating it if necessary. + * logEntry: e.g. + * /hbase/some_table/2323432434/recovered.edits/2332. This method also ensures + * existence of RECOVERED_EDITS_DIR under the region creating it if necessary. + * * @param fs + * @param conf * @param logEntry * @param rootDir HBase root dir. * @return Path to file into which to dump split log edits. * @throws IOException */ - static Path getRegionSplitEditsPath(final FileSystem fs, + static Path getRegionSplitEditsPath(final FileSystem fs, Configuration conf, final Entry logEntry, final Path rootDir, boolean isCreate) - throws IOException { + throws IOException { Path tableDir = HTableDescriptor.getTableDir(rootDir, logEntry.getKey() .getTablename()); Path regiondir = HRegion.getRegionDir(tableDir, @@ -666,7 +668,8 @@ public class HLogSplitter { return null; } if (isCreate && !fs.exists(dir)) { - if (!fs.mkdirs(dir)) LOG.warn("mkdir failed on " + dir); + if (!FSUtils.mkdirs(conf, fs, dir)) + LOG.warn("mkdir failed on " + dir); } // Append file name ends with RECOVERED_LOG_TMPFILE_SUFFIX to ensure // region's replayRecoveredEdits will not delete it @@ -1073,7 +1076,7 @@ public class HLogSplitter { private WriterAndPath createWAP(byte[] region, Entry entry, Path rootdir, String tmpname, FileSystem fs, Configuration conf) throws IOException { - Path regionedits = getRegionSplitEditsPath(fs, entry, rootdir, + Path regionedits = getRegionSplitEditsPath(fs, conf, entry, rootdir, tmpname==null); if (regionedits == null) { return null; @@ -1123,7 +1126,9 @@ public class HLogSplitter { } Path dir = ret.getParent(); if (!fs.exists(dir)) { - if (!fs.mkdirs(dir)) LOG.warn("mkdir failed on " + dir); + if (!FSUtils.mkdirs(conf, fs, dir)) { + LOG.warn("mkdir failed on " + dir); + } } } catch (IOException e) { LOG.warn("Could not prepare temp staging area ", e); diff --git src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java index 62cf6ac..9d3cd91 100644 --- src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java +++ src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java @@ -39,10 +39,10 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableDescriptors; -import org.apache.hadoop.hbase.TableExistsException; /** @@ -205,7 +205,9 @@ public class FSTableDescriptors implements TableDescriptors { if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(htd.getNameAsString())) { throw new NotImplementedException(); } - if (!this.fsreadonly) updateHTableDescriptor(this.fs, this.rootdir, htd); + if (!this.fsreadonly) + updateHTableDescriptor(HBaseConfiguration.create(), this.fs, + this.rootdir, htd); long modtime = getTableInfoModtime(this.fs, this.rootdir, htd.getNameAsString()); this.cache.put(htd.getNameAsString(), new TableDescriptorModtime(modtime, htd)); } @@ -414,11 +416,12 @@ public class FSTableDescriptors implements TableDescriptors { * @return New tableinfo or null if we failed update. * @throws IOException Thrown if failed update. */ - static Path updateHTableDescriptor(FileSystem fs, Path rootdir, + static Path updateHTableDescriptor(Configuration conf, FileSystem fs, + Path rootdir, HTableDescriptor hTableDescriptor) throws IOException { Path tableDir = FSUtils.getTablePath(rootdir, hTableDescriptor.getName()); - Path p = writeTableDescriptor(fs, hTableDescriptor, tableDir, + Path p = writeTableDescriptor(conf, fs, hTableDescriptor, tableDir, getTableInfoPath(fs, tableDir)); if (p == null) throw new IOException("Failed update"); LOG.info("Updated tableinfo=" + p); @@ -447,10 +450,9 @@ public class FSTableDescriptors implements TableDescriptors { * @return Descriptor file or null if we failed write. * @throws IOException */ - private static Path writeTableDescriptor(final FileSystem fs, - final HTableDescriptor hTableDescriptor, final Path tableDir, - final FileStatus status) - throws IOException { + private static Path writeTableDescriptor(final Configuration conf, + final FileSystem fs, final HTableDescriptor hTableDescriptor, + final Path tableDir, final FileStatus status) throws IOException { // Get temporary dir into which we'll first write a file to avoid // half-written file phenomeon. Path tmpTableDir = new Path(tableDir, ".tmp"); @@ -474,7 +476,7 @@ public class FSTableDescriptors implements TableDescriptors { continue; } try { - writeHTD(fs, p, hTableDescriptor); + writeHTD(conf, fs, p, hTableDescriptor); tableInfoPath = getTableInfoFileName(tableDir, sequenceid); if (!fs.rename(p, tableInfoPath)) { throw new IOException("Failed rename of " + p + " to " + tableInfoPath); @@ -499,10 +501,10 @@ public class FSTableDescriptors implements TableDescriptors { return tableInfoPath; } - private static void writeHTD(final FileSystem fs, final Path p, - final HTableDescriptor htd) - throws IOException { - FSDataOutputStream out = fs.create(p, false); + private static void writeHTD(final Configuration conf, final FileSystem fs, + final Path p, final HTableDescriptor htd) throws IOException { + + FSDataOutputStream out = FSUtils.createFile(conf, fs, p, false); try { htd.write(out); out.write('\n'); @@ -538,8 +540,8 @@ public class FSTableDescriptors implements TableDescriptors { final Configuration conf, boolean forceCreation) throws IOException { FileSystem fs = FSUtils.getCurrentFileSystem(conf); - return createTableDescriptor(fs, FSUtils.getRootDir(conf), htableDescriptor, - forceCreation); + return createTableDescriptor(conf, fs, FSUtils.getRootDir(conf), + htableDescriptor, forceCreation); } /** @@ -552,7 +554,8 @@ public class FSTableDescriptors implements TableDescriptors { public static boolean createTableDescriptor(FileSystem fs, Path rootdir, HTableDescriptor htableDescriptor) throws IOException { - return createTableDescriptor(fs, rootdir, htableDescriptor, false); + return createTableDescriptor(HBaseConfiguration.create(), fs, rootdir, + htableDescriptor, false); } /** @@ -560,14 +563,17 @@ public class FSTableDescriptors implements TableDescriptors { * forceCreation is true then even if previous table descriptor is present it * will be overwritten * + * @param conf TODO * @param fs - * @param htableDescriptor * @param rootdir + * @param htableDescriptor * @param forceCreation + * * @return True if we successfully created file. */ - public static boolean createTableDescriptor(FileSystem fs, Path rootdir, - HTableDescriptor htableDescriptor, boolean forceCreation) + public static boolean createTableDescriptor(Configuration conf, + FileSystem fs, Path rootdir, HTableDescriptor htableDescriptor, + boolean forceCreation) throws IOException { FileStatus status = getTableInfoPath(fs, rootdir, htableDescriptor.getNameAsString()); @@ -580,7 +586,7 @@ public class FSTableDescriptors implements TableDescriptors { } } } - Path p = writeTableDescriptor(fs, htableDescriptor, + Path p = writeTableDescriptor(conf, fs, htableDescriptor, FSUtils.getTablePath(rootdir, htableDescriptor.getNameAsString()), status); return p != null; } diff --git src/main/java/org/apache/hadoop/hbase/util/FSUtils.java src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index d2d7efe..68ee13c 100644 --- src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -42,6 +42,8 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HRegionInfo; @@ -61,6 +63,14 @@ import org.apache.hadoop.util.StringUtils; public abstract class FSUtils { private static final Log LOG = LogFactory.getLog(FSUtils.class); + public static final String FILE_PERMISSION_UMASK_KEY = "hbase.permission.umask.file"; + public static final String DIRECTORY_PERMISSION_UMASK_KEY = "hbase.permission.umask.directory"; + /** + * Default file mask - removes write permission from group and world (standard + * posix) + */ + private static final String DEFAULT_FILE_UMASK = "022"; + protected FSUtils() { super(); } @@ -68,30 +78,32 @@ public abstract class FSUtils { public static FSUtils getInstance(FileSystem fs, Configuration conf) { String scheme = fs.getUri().getScheme(); if (scheme == null) { - LOG.warn("Could not find scheme for uri " + - fs.getUri() + ", default to hdfs"); + LOG.warn("Could not find scheme for uri " + fs.getUri() + + ", default to hdfs"); scheme = "hdfs"; } - Class fsUtilsClass = conf.getClass("hbase.fsutil." + - scheme + ".impl", FSHDFSUtils.class); // Default to HDFS impl - FSUtils fsUtils = (FSUtils)ReflectionUtils.newInstance(fsUtilsClass, conf); + Class fsUtilsClass = conf.getClass("hbase.fsutil." + scheme + ".impl", + FSHDFSUtils.class); // Default to HDFS impl + FSUtils fsUtils = (FSUtils) ReflectionUtils.newInstance(fsUtilsClass, conf); return fsUtils; } /** * Delete if exists. + * * @param fs filesystem object * @param dir directory to delete * @return True if deleted dir * @throws IOException e */ public static boolean deleteDirectory(final FileSystem fs, final Path dir) - throws IOException { + throws IOException { return fs.exists(dir) && fs.delete(dir, true); } /** - * Check if directory exists. If it does not, create it. + * Check if directory exists. If it does not, create it. + * * @param fs filesystem object * @param dir path to check * @return Path @@ -105,31 +117,145 @@ public abstract class FSUtils { } /** - * Create file. - * @param fs filesystem object - * @param p path to create - * @return Path - * @throws IOException e + * Make the given file and all non-existent parents into directories. Has the + * semantics of Unix 'mkdir -p'. Existence of the directory hierarchy is not + * an error. + *

+ * Will apply the umask stored in the configuration (if enabled) to the + * default directory permissions of all directories created. */ - public static Path create(final FileSystem fs, final Path p) - throws IOException { - if (fs.exists(p)) { - throw new IOException("File already exists " + p.toString()); - } - if (!fs.createNewFile(p)) { - throw new IOException("Failed create of " + p); + public static boolean mkdirs(final Configuration conf, final FileSystem fs, + final Path path) + throws IOException { + FsPermission perms = getDirectoryPermissions(fs, conf); + LOG.info("--Creating directory:" + path + "with permission:" + perms); + return fs.mkdirs(path, perms); + } + + /** + * Create the specified file on the filesystem. By default, this will: + *

    + *
  1. overwrite the file if it exists
  2. + *
  3. apply the umask in the configuration (if it is enabled)
  4. + *
  5. use the fs configured buffer size (or {@value DEFAULT_BUFFER_SIZE} if + * not set)
  6. + *
  7. use the default replication
  8. + *
  9. use the default block size
  10. + *
  11. not track progress
  12. + *
+ * + * @param fs {@link FileSystem} on which to write the file + * @param path {@link Path} to the file to write + * @return output stream to the created file + * @throws IOException if the file cannot be created + */ + public static FSDataOutputStream createFile(final Configuration conf, + final FileSystem fs, final Path path) throws IOException { + return createFile(conf, fs, path, true); + } + + /** + * Create the specified file on the filesystem. By default, this will: + *
    + *
  1. apply the umask in the configuration (if it is enabled)
  2. + *
  3. use the fs configured buffer size (or {@value DEFAULT_BUFFER_SIZE} if + * not set)
  4. + *
  5. use the default replication
  6. + *
  7. use the default block size
  8. + *
  9. not track progress
  10. + *
+ * + * @param fs {@link FileSystem} on which to write the file + * @param path {@link Path} to the file to write + * @param overwrite Whether or not the created file should be overwritten. + * @return output stream to the created file + * @throws IOException if the file cannot be created + */ + public static FSDataOutputStream createFile(Configuration conf, + FileSystem fs, Path path, + boolean overwrite) throws IOException { + FsPermission perms = getFilePermissions(fs, conf); + LOG.info("--Creating file:" + path + "with permission:" + perms); + + return fs.create(path, perms, overwrite, + fs.getConf().getInt("io.file.buffer.size", 4096), + fs.getDefaultReplication(), fs.getDefaultBlockSize(), null); + + } + + + /** + * Get the file permissions specified in the configuration, applying the umask + * if it is enabled. + * + * @param fs filesystem that the file will be created on. + * @param permssionConfKey property key in the configuration to use when + * finding the permission + * @param conf configuration to read for determining if permissions are + * enabled and which to use + * @return the permission to use when creating a new file on the fs. If + * special permissions are not specified in the configuration, then + * the default permissions on the the fs will be returned. + */ + public static FsPermission getFilePermissions(final FileSystem fs, + final Configuration conf) { + return getConfiguredPermission(fs, conf, FILE_PERMISSION_UMASK_KEY); + } + + /** + * Get the file permissions specified in the configuration, applying the umask + * it is enabled. + * + * @param fs filesystem that the file will be created on. + * @param permssionConfKey property key in the configuration to use when + * finding the permission + * @param conf configuration to read for determining if permissions are + * enabled and which to use + * @return the permission to use when creating a new file on the fs. If + * special permissions are not specified in the configuration, then + * the default permissions on the the fs will be returned. + */ + public static FsPermission getDirectoryPermissions(final FileSystem fs, + final Configuration conf) { + return getConfiguredPermission(fs, conf, DIRECTORY_PERMISSION_UMASK_KEY); + } + + /** + * Get a permission based on the default fs permission umasked by the value in + * the configuration, if it is enabled. + * + * @param fs {@link FileSystem} to get the default permissions from + * @param conf to read the umask from + * @param confKey key to look in the configuration for the umask + * @return + */ + private static FsPermission getConfiguredPermission(final FileSystem fs, + final Configuration conf, final String confKey) { + boolean enablePermissions = conf.getBoolean( + HConstants.ENABLE_FILE_PERMISSIONS, false); + FsPermission perm = FsPermission.getDefault(); + if (enablePermissions) { + try { + LOG.debug("Starting with permission:" + perm); + FsPermission umask = new FsPermission(conf.get(confKey, + DEFAULT_FILE_UMASK)); + LOG.debug("Using umask: " + umask); + perm = perm.applyUMask(umask); + } catch (IllegalArgumentException e) { + LOG.error("Incorrect umask read from configuration, using default!", e); + } } - return p; + return perm; } /** * Checks to see if the specified file system is available - * + * * @param fs filesystem * @throws IOException e */ public static void checkFileSystemAvailable(final FileSystem fs) - throws IOException { + throws IOException { if (!(fs instanceof DistributedFileSystem)) { return; } @@ -145,7 +271,7 @@ public abstract class FSUtils { try { fs.close(); } catch (Exception e) { - LOG.error("file system close failed: ", e); + LOG.error("file system close failed: ", e); } IOException io = new IOException("File system is not available"); io.initCause(exception); @@ -153,39 +279,41 @@ public abstract class FSUtils { } /** - * Check whether dfs is in safemode. + * Check whether dfs is in safemode. + * * @param conf * @throws IOException */ - public static void checkDfsSafeMode(final Configuration conf) - throws IOException { + public static void checkDfsSafeMode(final Configuration conf) + throws IOException { boolean isInSafeMode = false; FileSystem fs = FileSystem.get(conf); if (fs instanceof DistributedFileSystem) { - DistributedFileSystem dfs = (DistributedFileSystem)fs; + DistributedFileSystem dfs = (DistributedFileSystem) fs; // Check whether dfs is on safemode. - isInSafeMode = dfs.setSafeMode(org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET); + isInSafeMode = dfs + .setSafeMode(org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET); } if (isInSafeMode) { - throw new IOException("File system is in safemode, it can't be written now"); + throw new IOException( + "File system is in safemode, it can't be written now"); } } - + /** * Verifies current version of file system - * + * * @param fs filesystem object * @param rootdir root hbase directory * @return null if no version file exists, version string otherwise. * @throws IOException e */ public static String getVersion(FileSystem fs, Path rootdir) - throws IOException { + throws IOException { Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME); String version = null; if (fs.exists(versionFile)) { - FSDataInputStream s = - fs.open(versionFile); + FSDataInputStream s = fs.open(versionFile); try { version = DataInputStream.readUTF(s); } catch (EOFException eof) { @@ -197,52 +325,38 @@ public abstract class FSUtils { return version; } - /** - * Verifies current version of file system - * - * @param fs file system - * @param rootdir root directory of HBase installation - * @param message if true, issues a message on System.out - * - * @throws IOException e - */ - public static void checkVersion(FileSystem fs, Path rootdir, - boolean message) throws IOException { - checkVersion(fs, rootdir, message, 0, - HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS); - } /** * Verifies current version of file system - * + * * @param fs file system * @param rootdir root directory of HBase installation * @param message if true, issues a message on System.out * @param wait wait interval * @param retries number of times to retry - * + * * @throws IOException e */ - public static void checkVersion(FileSystem fs, Path rootdir, - boolean message, int wait, int retries) throws IOException { + public static void checkVersion(Configuration conf, FileSystem fs, + Path rootdir, boolean message, + int wait, int retries) throws IOException { String version = getVersion(fs, rootdir); if (version == null) { if (!rootRegionExists(fs, rootdir)) { // rootDir is empty (no version file and no root region) // just create new version file (HBASE-1195) - FSUtils.setVersion(fs, rootdir, wait, retries); + FSUtils.setVersion(conf, fs, rootdir, wait, retries); return; } } else if (version.compareTo(HConstants.FILE_SYSTEM_VERSION) == 0) - return; + return; // version is deprecated require migration // Output on stdout so user sees it in terminal. - String msg = "File system needs to be upgraded." - + " You have version " + version - + " and I want version " + HConstants.FILE_SYSTEM_VERSION - + ". Run the '${HBASE_HOME}/bin/hbase migrate' script."; + String msg = "File system needs to be upgraded." + " You have version " + + version + " and I want version " + HConstants.FILE_SYSTEM_VERSION + + ". Run the '${HBASE_HOME}/bin/hbase migrate' script."; if (message) { System.out.println("WARNING! " + msg); } @@ -251,61 +365,62 @@ public abstract class FSUtils { /** * Sets version of file system - * + * * @param fs filesystem object * @param rootdir hbase root + * @param wait time to wait for retry + * @param retries number of times to retry before failing * @throws IOException e */ - public static void setVersion(FileSystem fs, Path rootdir) - throws IOException { - setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, 0, - HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS); + public static void setVersion(Configuration conf, FileSystem fs, + Path rootdir, int wait, + int retries) throws IOException { + setVersion(conf, fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries); } /** - * Sets version of file system - * + * Sets version of file system. Uses the default configuration. + * * @param fs filesystem object * @param rootdir hbase root - * @param wait time to wait for retry - * @param retries number of times to retry before failing * @throws IOException e */ - public static void setVersion(FileSystem fs, Path rootdir, int wait, int retries) - throws IOException { - setVersion(fs, rootdir, HConstants.FILE_SYSTEM_VERSION, wait, retries); + public static void setVersion(FileSystem fs, Path rootdir) throws IOException { + setVersion(HBaseConfiguration.create(), fs, rootdir, + HConstants.FILE_SYSTEM_VERSION, 0, + HConstants.DEFAULT_VERSION_FILE_WRITE_ATTEMPTS); } - /** * Sets version of file system - * - * @param fs filesystem object - * @param rootdir hbase root directory - * @param version version to set + * + * @param conf filesystem object + * @param fs hbase root directory + * @param rd version to set * @param wait time to wait for retry * @param retries number of times to retry before throwing an IOException * @throws IOException e */ - public static void setVersion(FileSystem fs, Path rootdir, String version, + public static void setVersion(Configuration conf, FileSystem fs, + Path rootdir, String version, int wait, int retries) throws IOException { Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME); while (true) { try { - FSDataOutputStream s = fs.create(versionFile); + FSDataOutputStream s = FSUtils.createFile(conf, fs, versionFile); s.writeUTF(version); - LOG.debug("Created version file at " + rootdir.toString() + - " set its version at:" + version); + LOG.debug("Created version file at " + rootdir.toString() + + " set its version at:" + version); s.close(); return; } catch (IOException e) { if (retries > 0) { - LOG.warn("Unable to create version file at " + rootdir.toString() + - ", retrying: " + e.getMessage()); + LOG.warn("Unable to create version file at " + rootdir.toString() + + ", retrying: " + e.getMessage()); fs.delete(versionFile, false); try { if (wait > 0) { - Thread.sleep(wait); + Thread.sleep(wait); } } catch (InterruptedException ex) { // ignore @@ -320,6 +435,7 @@ public abstract class FSUtils { /** * Checks that a cluster ID file exists in the HBase root directory + * * @param fs the root directory FileSystem * @param rootdir the HBase root directory in HDFS * @param wait how long to wait between retries @@ -334,8 +450,9 @@ public abstract class FSUtils { return fs.exists(filePath); } catch (IOException ioe) { if (wait > 0) { - LOG.warn("Unable to check cluster ID file in " + rootdir.toString() + - ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe)); + LOG.warn("Unable to check cluster ID file in " + rootdir.toString() + + ", retrying in " + wait + "msec: " + + StringUtils.stringifyException(ioe)); try { Thread.sleep(wait); } catch (InterruptedException ie) { @@ -352,6 +469,7 @@ public abstract class FSUtils { /** * Returns the value of the unique cluster ID stored for this HBase instance. + * * @param fs the root directory FileSystem * @param rootdir the path to the HBase root directory * @return the unique cluster identifier @@ -366,8 +484,8 @@ public abstract class FSUtils { try { clusterId = in.readUTF(); } catch (EOFException eof) { - LOG.warn("Cluster ID file "+idPath.toString()+" was empty"); - } finally{ + LOG.warn("Cluster ID file " + idPath.toString() + " was empty"); + } finally { in.close(); } } else { @@ -377,31 +495,35 @@ public abstract class FSUtils { } /** - * Writes a new unique identifier for this cluster to the "hbase.id" file - * in the HBase root directory + * Writes a new unique identifier for this cluster to the "hbase.id" file in + * the HBase root directory + * + * @param conf HBase {@link Configuration} for the cluster * @param fs the root directory FileSystem * @param rootdir the path to the HBase root directory * @param clusterId the unique identifier to store * @param wait how long (in milliseconds) to wait between retries * @throws IOException if writing to the FileSystem fails and no wait value */ - public static void setClusterId(FileSystem fs, Path rootdir, String clusterId, - int wait) throws IOException { + public static void setClusterId(Configuration conf, FileSystem fs, + Path rootdir, + String clusterId, int wait) throws IOException { while (true) { try { Path filePath = new Path(rootdir, HConstants.CLUSTER_ID_FILE_NAME); - FSDataOutputStream s = fs.create(filePath); + FSDataOutputStream s = FSUtils.createFile(conf, fs, filePath); s.writeUTF(clusterId); s.close(); if (LOG.isDebugEnabled()) { - LOG.debug("Created cluster ID file at " + filePath.toString() + - " with ID: " + clusterId); + LOG.debug("Created cluster ID file at " + filePath.toString() + + " with ID: " + clusterId); } return; } catch (IOException ioe) { if (wait > 0) { - LOG.warn("Unable to create cluster ID file in " + rootdir.toString() + - ", retrying in "+wait+"msec: "+StringUtils.stringifyException(ioe)); + LOG.warn("Unable to create cluster ID file in " + rootdir.toString() + + ", retrying in " + wait + "msec: " + + StringUtils.stringifyException(ioe)); try { Thread.sleep(wait); } catch (InterruptedException ie) { @@ -417,7 +539,7 @@ public abstract class FSUtils { /** * Verifies root directory path is a valid URI with a scheme - * + * * @param root root directory path * @return Passed root argument. * @throws IOException if not a valid URI with a scheme @@ -431,8 +553,8 @@ public abstract class FSUtils { } return root; } catch (URISyntaxException e) { - IOException io = new IOException("Root directory path is not a valid " + - "URI -- check your " + HConstants.HBASE_DIR + " configuration"); + IOException io = new IOException("Root directory path is not a valid " + + "URI -- check your " + HConstants.HBASE_DIR + " configuration"); io.initCause(e); throw io; } @@ -440,34 +562,37 @@ public abstract class FSUtils { /** * If DFS, check safe mode and if so, wait until we clear it. + * * @param conf configuration * @param wait Sleep between retries * @throws IOException e */ - public static void waitOnSafeMode(final Configuration conf, - final long wait) - throws IOException { + public static void waitOnSafeMode(final Configuration conf, final long wait) + throws IOException { FileSystem fs = FileSystem.get(conf); - if (!(fs instanceof DistributedFileSystem)) return; - DistributedFileSystem dfs = (DistributedFileSystem)fs; + if (!(fs instanceof DistributedFileSystem)) + return; + DistributedFileSystem dfs = (DistributedFileSystem) fs; // Make sure dfs is not in safe mode - while (dfs.setSafeMode(org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET)) { + while (dfs + .setSafeMode(org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction.SAFEMODE_GET)) { LOG.info("Waiting for dfs to exit safe mode..."); try { Thread.sleep(wait); } catch (InterruptedException e) { - //continue + // continue } } } /** - * Return the 'path' component of a Path. In Hadoop, Path is an URI. This + * Return the 'path' component of a Path. In Hadoop, Path is an URI. This * method returns the 'path' component of a Path's URI: e.g. If a Path is * hdfs://example.org:9000/hbase_trunk/TestTable/compaction.dir, * this method returns /hbase_trunk/TestTable/compaction.dir. * This method is useful if you want to print out a Path without qualifying * Filesystem instance. + * * @param p Filesystem Path whose 'path' component we are to return. * @return Path portion of the Filesystem */ @@ -478,7 +603,7 @@ public abstract class FSUtils { /** * @param c configuration * @return Path to hbase root directory: i.e. hbase.rootdir from - * configuration as a qualified Path. + * configuration as a qualified Path. * @throws IOException e */ public static Path getRootDir(final Configuration c) throws IOException { @@ -489,63 +614,62 @@ public abstract class FSUtils { /** * Checks if root region exists - * + * * @param fs file system * @param rootdir root directory of HBase installation * @return true if exists * @throws IOException e */ public static boolean rootRegionExists(FileSystem fs, Path rootdir) - throws IOException { - Path rootRegionDir = - HRegion.getRegionDir(rootdir, HRegionInfo.ROOT_REGIONINFO); + throws IOException { + Path rootRegionDir = HRegion.getRegionDir(rootdir, + HRegionInfo.ROOT_REGIONINFO); return fs.exists(rootRegionDir); } /** * Compute HDFS blocks distribution of a given file, or a portion of the file + * * @param fs file system * @param status file status of the file * @param start start position of the portion - * @param length length of the portion + * @param length length of the portion * @return The HDFS blocks distribution - */ + */ static public HDFSBlocksDistribution computeHDFSBlocksDistribution( - final FileSystem fs, FileStatus status, long start, long length) - throws IOException { + final FileSystem fs, FileStatus status, long start, long length) + throws IOException { HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution(); - BlockLocation [] blockLocations = - fs.getFileBlockLocations(status, start, length); - for(BlockLocation bl : blockLocations) { - String [] hosts = bl.getHosts(); + BlockLocation[] blockLocations = fs.getFileBlockLocations(status, start, + length); + for (BlockLocation bl : blockLocations) { + String[] hosts = bl.getHosts(); long len = bl.getLength(); blocksDistribution.addHostsAndBlockWeight(hosts, len); } - + return blocksDistribution; } - - /** - * Runs through the hbase rootdir and checks all stores have only - * one file in them -- that is, they've been major compacted. Looks - * at root and meta tables too. + * Runs through the hbase rootdir and checks all stores have only one file in + * them -- that is, they've been major compacted. Looks at root and meta + * tables too. + * * @param fs filesystem * @param hbaseRootDir hbase root directory * @return True if this hbase install is major compacted. * @throws IOException e */ public static boolean isMajorCompacted(final FileSystem fs, - final Path hbaseRootDir) - throws IOException { + final Path hbaseRootDir) throws IOException { // Presumes any directory under hbase.rootdir is a table. - FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs)); + FileStatus[] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs)); for (FileStatus tableDir : tableDirs) { - // Skip the .log directory. All others should be tables. Inside a table, - // there are compaction.dir directories to skip. Otherwise, all else - // should be regions. Then in each region, should only be family - // directories. Under each of these, should be one file only. + // Skip the .log directory. All others should be tables. Inside a table, + // there are compaction.dir directories to skip. Otherwise, all else + // should be regions. Then in each region, should only be family + // directories. Under each of these, should be one file only. Path d = tableDir.getPath(); if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) { continue; @@ -556,15 +680,15 @@ public abstract class FSUtils { if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) { continue; } - // Else its a region name. Now look in region for families. + // Else its a region name. Now look in region for families. FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs)); for (FileStatus familyDir : familyDirs) { Path family = familyDir.getPath(); // Now in family make sure only one file. FileStatus[] familyStatus = fs.listStatus(family); if (familyStatus.length > 1) { - LOG.debug(family.toString() + " has " + familyStatus.length + - " files."); + LOG.debug(family.toString() + " has " + familyStatus.length + + " files."); return false; } } @@ -577,13 +701,13 @@ public abstract class FSUtils { /** * Returns the total overall fragmentation percentage. Includes .META. and * -ROOT- as well. - * - * @param master The master defining the HBase root and file system. + * + * @param master The master defining the HBase root and file system. * @return A map for each table and its percentage. * @throws IOException When scanning the directory fails. */ public static int getTotalTableFragmentation(final HMaster master) - throws IOException { + throws IOException { Map map = getTableFragmentation(master); return map != null && map.size() > 0 ? map.get("-TOTAL-") : -1; } @@ -592,14 +716,13 @@ public abstract class FSUtils { * Runs through the HBase rootdir and checks how many stores for each table * have more than one file in them. Checks -ROOT- and .META. too. The total * percentage across all tables is stored under the special key "-TOTAL-". - * - * @param master The master defining the HBase root and file system. + * + * @param master The master defining the HBase root and file system. * @return A map for each table and its percentage. * @throws IOException When scanning the directory fails. */ - public static Map getTableFragmentation( - final HMaster master) - throws IOException { + public static Map getTableFragmentation(final HMaster master) + throws IOException { Path path = getRootDir(master.getConfiguration()); // since HMaster.getFileSystem() is package private FileSystem fs = path.getFileSystem(master.getConfiguration()); @@ -610,26 +733,25 @@ public abstract class FSUtils { * Runs through the HBase rootdir and checks how many stores for each table * have more than one file in them. Checks -ROOT- and .META. too. The total * percentage across all tables is stored under the special key "-TOTAL-". - * - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. + * + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. * @return A map for each table and its percentage. * @throws IOException When scanning the directory fails. */ - public static Map getTableFragmentation( - final FileSystem fs, final Path hbaseRootDir) - throws IOException { + public static Map getTableFragmentation(final FileSystem fs, + final Path hbaseRootDir) throws IOException { Map frags = new HashMap(); int cfCountTotal = 0; int cfFragTotal = 0; DirFilter df = new DirFilter(fs); // presumes any directory under hbase.rootdir is a table - FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df); + FileStatus[] tableDirs = fs.listStatus(hbaseRootDir, df); for (FileStatus tableDir : tableDirs) { - // Skip the .log directory. All others should be tables. Inside a table, - // there are compaction.dir directories to skip. Otherwise, all else - // should be regions. Then in each region, should only be family - // directories. Under each of these, should be one file only. + // Skip the .log directory. All others should be tables. Inside a table, + // there are compaction.dir directories to skip. Otherwise, all else + // should be regions. Then in each region, should only be family + // directories. Under each of these, should be one file only. Path d = tableDir.getPath(); if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) { continue; @@ -666,39 +788,39 @@ public abstract class FSUtils { /** * Expects to find -ROOT- directory. + * * @param fs filesystem * @param hbaseRootDir hbase root directory * @return True if this a pre020 layout. * @throws IOException e */ public static boolean isPre020FileLayout(final FileSystem fs, - final Path hbaseRootDir) - throws IOException { - Path mapfiles = new Path(new Path(new Path(new Path(hbaseRootDir, "-ROOT-"), - "70236052"), "info"), "mapfiles"); + final Path hbaseRootDir) throws IOException { + Path mapfiles = new Path(new Path(new Path( + new Path(hbaseRootDir, "-ROOT-"), "70236052"), "info"), "mapfiles"); return fs.exists(mapfiles); } /** - * Runs through the hbase rootdir and checks all stores have only - * one file in them -- that is, they've been major compacted. Looks - * at root and meta tables too. This version differs from - * {@link #isMajorCompacted(FileSystem, Path)} in that it expects a - * pre-0.20.0 hbase layout on the filesystem. Used migrating. + * Runs through the hbase rootdir and checks all stores have only one file in + * them -- that is, they've been major compacted. Looks at root and meta + * tables too. This version differs from + * {@link #isMajorCompacted(FileSystem, Path)} in that it expects a pre-0.20.0 + * hbase layout on the filesystem. Used migrating. + * * @param fs filesystem * @param hbaseRootDir hbase root directory * @return True if this hbase install is major compacted. * @throws IOException e */ public static boolean isMajorCompactedPre020(final FileSystem fs, - final Path hbaseRootDir) - throws IOException { + final Path hbaseRootDir) throws IOException { // Presumes any directory under hbase.rootdir is a table. - FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs)); + FileStatus[] tableDirs = fs.listStatus(hbaseRootDir, new DirFilter(fs)); for (FileStatus tableDir : tableDirs) { // Inside a table, there are compaction.dir directories to skip. - // Otherwise, all else should be regions. Then in each region, should - // only be family directories. Under each of these, should be a mapfile + // Otherwise, all else should be regions. Then in each region, should + // only be family directories. Under each of these, should be a mapfile // and info directory and in these only one file. Path d = tableDir.getPath(); if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) { @@ -710,33 +832,34 @@ public abstract class FSUtils { if (dd.getName().equals(HConstants.HREGION_COMPACTIONDIR_NAME)) { continue; } - // Else its a region name. Now look in region for families. + // Else its a region name. Now look in region for families. FileStatus[] familyDirs = fs.listStatus(dd, new DirFilter(fs)); for (FileStatus familyDir : familyDirs) { Path family = familyDir.getPath(); FileStatus[] infoAndMapfile = fs.listStatus(family); // Assert that only info and mapfile in family dir. if (infoAndMapfile.length != 0 && infoAndMapfile.length != 2) { - LOG.debug(family.toString() + - " has more than just info and mapfile: " + infoAndMapfile.length); + LOG.debug(family.toString() + + " has more than just info and mapfile: " + + infoAndMapfile.length); return false; } // Make sure directory named info or mapfile. for (int ll = 0; ll < 2; ll++) { - if (infoAndMapfile[ll].getPath().getName().equals("info") || - infoAndMapfile[ll].getPath().getName().equals("mapfiles")) + if (infoAndMapfile[ll].getPath().getName().equals("info") + || infoAndMapfile[ll].getPath().getName().equals("mapfiles")) continue; - LOG.debug("Unexpected directory name: " + - infoAndMapfile[ll].getPath()); + LOG.debug("Unexpected directory name: " + + infoAndMapfile[ll].getPath()); return false; } - // Now in family, there are 'mapfile' and 'info' subdirs. Just + // Now in family, there are 'mapfile' and 'info' subdirs. Just // look in the 'mapfile' subdir. - FileStatus[] familyStatus = - fs.listStatus(new Path(family, "mapfiles")); + FileStatus[] familyStatus = fs + .listStatus(new Path(family, "mapfiles")); if (familyStatus.length > 1) { - LOG.debug(family.toString() + " has " + familyStatus.length + - " files."); + LOG.debug(family.toString() + " has " + familyStatus.length + + " files."); return false; } } @@ -761,7 +884,7 @@ public abstract class FSUtils { if (HConstants.HBASE_NON_USER_TABLE_DIRS.contains(p)) { isValid = false; } else { - isValid = this.fs.getFileStatus(p).isDir(); + isValid = this.fs.getFileStatus(p).isDir(); } } catch (IOException e) { e.printStackTrace(); @@ -772,8 +895,9 @@ public abstract class FSUtils { /** * Heuristic to determine whether is safe or not to open a file for append - * Looks both for dfs.support.append and use reflection to search - * for SequenceFile.Writer.syncFs() or FSDataOutputStream.hflush() + * Looks both for dfs.support.append and use reflection to search for + * SequenceFile.Writer.syncFs() or FSDataOutputStream.hflush() + * * @param conf * @return True if append support */ @@ -784,7 +908,7 @@ public abstract class FSUtils { // TODO: The implementation that comes back when we do a createWriter // may not be using SequenceFile so the below is not a definitive test. // Will do for now (hdfs-200). - SequenceFile.Writer.class.getMethod("syncFs", new Class []{}); + SequenceFile.Writer.class.getMethod("syncFs", new Class[] {}); append = true; } catch (SecurityException e) { } catch (NoSuchMethodException e) { @@ -794,7 +918,7 @@ public abstract class FSUtils { if (!append) { // Look for the 0.21, 0.22, new-style append evidence. try { - FSDataOutputStream.class.getMethod("hflush", new Class []{}); + FSDataOutputStream.class.getMethod("hflush", new Class[] {}); append = true; } catch (NoSuchMethodException e) { append = false; @@ -815,8 +939,9 @@ public abstract class FSUtils { } /** - * Recover file lease. Used when a file might be suspect - * to be had been left open by another process. + * Recover file lease. Used when a file might be suspect to be had been left + * open by another process. + * * @param fs FileSystem handle * @param p Path of file to recover lease * @param conf Configuration handle @@ -824,20 +949,21 @@ public abstract class FSUtils { */ public abstract void recoverFileLease(final FileSystem fs, final Path p, Configuration conf) throws IOException; - + /** * @param fs * @param rootdir - * @return All the table directories under rootdir. Ignore non table hbase folders such as - * .logs, .oldlogs, .corrupt, .META., and -ROOT- folders. + * @return All the table directories under rootdir. Ignore non + * table hbase folders such as .logs, .oldlogs, .corrupt, .META., and + * -ROOT- folders. * @throws IOException */ public static List getTableDirs(final FileSystem fs, final Path rootdir) - throws IOException { + throws IOException { // presumes any directory under hbase.rootdir is a table - FileStatus [] dirs = fs.listStatus(rootdir, new DirFilter(fs)); + FileStatus[] dirs = fs.listStatus(rootdir, new DirFilter(fs)); List tabledirs = new ArrayList(dirs.length); - for (FileStatus dir: dirs) { + for (FileStatus dir : dirs) { Path p = dir.getPath(); String tableName = p.getName(); if (!HConstants.HBASE_NON_USER_TABLE_DIRS.contains(tableName)) { @@ -847,7 +973,7 @@ public abstract class FSUtils { return tabledirs; } - public static Path getTablePath(Path rootdir, byte [] tableName) { + public static Path getTablePath(Path rootdir, byte[] tableName) { return getTablePath(rootdir, Bytes.toString(tableName)); } @@ -861,38 +987,37 @@ public abstract class FSUtils { * @throws IOException */ public static FileSystem getCurrentFileSystem(Configuration conf) - throws IOException { + throws IOException { return getRootDir(conf).getFileSystem(conf); } - + /** - * Runs through the HBase rootdir and creates a reverse lookup map for - * table StoreFile names to the full Path. - *
+ * Runs through the HBase rootdir and creates a reverse lookup map for table + * StoreFile names to the full Path.
* Example...
- * Key = 3944417774205889744
- * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/3944417774205889744 - * - * @param fs The file system to use. - * @param hbaseRootDir The root directory to scan. + * Key = 3944417774205889744
+ * Value = hdfs://localhost:51169/user/userid/-ROOT-/70236052/info/ + * 3944417774205889744 + * + * @param fs The file system to use. + * @param hbaseRootDir The root directory to scan. * @return Map keyed by StoreFile name with a value of the full Path. * @throws IOException When scanning the directory fails. */ - public static Map getTableStoreFilePathMap( - final FileSystem fs, final Path hbaseRootDir) - throws IOException { + public static Map getTableStoreFilePathMap(final FileSystem fs, + final Path hbaseRootDir) throws IOException { Map map = new HashMap(); - - // if this method looks similar to 'getTableFragmentation' that is because + + // if this method looks similar to 'getTableFragmentation' that is because // it was borrowed from it. - + DirFilter df = new DirFilter(fs); // presumes any directory under hbase.rootdir is a table - FileStatus [] tableDirs = fs.listStatus(hbaseRootDir, df); + FileStatus[] tableDirs = fs.listStatus(hbaseRootDir, df); for (FileStatus tableDir : tableDirs) { - // Skip the .log directory. All others should be tables. Inside a table, - // there are compaction.dir directories to skip. Otherwise, all else - // should be regions. + // Skip the .log directory. All others should be tables. Inside a table, + // there are compaction.dir directories to skip. Otherwise, all else + // should be regions. Path d = tableDir.getPath(); if (d.getName().equals(HConstants.HREGION_LOGDIR_NAME)) { continue; @@ -912,34 +1037,35 @@ public abstract class FSUtils { FileStatus[] familyStatus = fs.listStatus(family); for (FileStatus sfStatus : familyStatus) { Path sf = sfStatus.getPath(); - map.put( sf.getName(), sf); + map.put(sf.getName(), sf); } - + } } } - return map; + return map; } - + /** - * Calls fs.listStatus() and treats FileNotFoundException as non-fatal - * This would accommodate difference in various hadoop versions + * Calls fs.listStatus() and treats FileNotFoundException as non-fatal This + * would accommodate difference in various hadoop versions * * @param fs file system * @param dir directory * @param filter path filter * @return null if tabledir doesn't exist, otherwise FileStatus array */ - public static FileStatus [] listStatus(final FileSystem fs, - final Path dir, final PathFilter filter) throws IOException { - FileStatus [] status = null; + public static FileStatus[] listStatus(final FileSystem fs, final Path dir, + final PathFilter filter) throws IOException { + FileStatus[] status = null; try { status = filter == null ? fs.listStatus(dir) : fs.listStatus(dir, filter); } catch (FileNotFoundException fnfe) { // if directory doesn't exist, return null LOG.info(dir + " doesn't exist"); } - if (status == null || status.length < 1) return null; + if (status == null || status.length < 1) + return null; return status; } } diff --git src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 720841c..e064cc2 100644 --- src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -491,7 +491,7 @@ public class HBaseFsck { long now = System.currentTimeMillis(); Path backupDir = new Path(hbaseDir.getParent(), hbaseDir.getName() + "-" + now); - fs.mkdirs(backupDir); + FSUtils.mkdirs(conf, fs, backupDir); sidelineTable(fs, HConstants.ROOT_TABLE_NAME, hbaseDir, backupDir); try { diff --git src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java index f6d088f..1386900 100644 --- src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java +++ src/main/java/org/apache/hadoop/hbase/util/RegionSplitter.java @@ -434,7 +434,7 @@ public class RegionSplitter { byte[] rawData = new byte[tmpIn.available()]; tmpIn.readFully(rawData); tmpIn.close(); - FSDataOutputStream splitOut = fs.create(splitFile); + FSDataOutputStream splitOut = FSUtils.createFile(conf, fs, splitFile); splitOut.write(rawData); try { @@ -743,7 +743,8 @@ public class RegionSplitter { // prepare the split file Path tmpFile = new Path(tableDir, "_balancedSplit_prepare"); - FSDataOutputStream tmpOut = fs.create(tmpFile); + FSDataOutputStream tmpOut = FSUtils.createFile(table.getConfiguration(), + fs, tmpFile); // calculate all the splits == [daughterRegions] = [(start, splitPoint)] for (Pair r : rows) { diff --git src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java index 30d7fe9..f1744a4 100644 --- src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java +++ src/main/java/org/apache/hadoop/hbase/zookeeper/ZKSplitLog.java @@ -32,12 +32,14 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.master.SplitLogManager; import org.apache.hadoop.hbase.regionserver.SplitLogWorker; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.FSUtils; /** * Common methods and attributes used by {@link SplitLogManager} and @@ -176,11 +178,11 @@ public class ZKSplitLog { return (worker + "_" + ZKSplitLog.encode(file)); } - public static void markCorrupted(Path rootdir, String tmpname, - FileSystem fs) { + public static void markCorrupted(Configuration conf, Path rootdir, + String tmpname, FileSystem fs) { Path file = new Path(getSplitLogDir(rootdir, tmpname), "corrupt"); try { - fs.createNewFile(file); + FSUtils.createFile(conf, fs, file); } catch (IOException e) { LOG.warn("Could not flag a log file as corrupted. Failed to create " + file, e); diff --git src/main/resources/hbase-default.xml src/main/resources/hbase-default.xml index 9277e0c..cf41541 100644 --- src/main/resources/hbase-default.xml +++ src/main/resources/hbase-default.xml @@ -869,4 +869,25 @@ value to 0. + + hbase.file.permissions.enable + false + Enable, if true, that file permissions should be assigned + to the files written by the regionserver + + + + hbase.permission.umask.file + 022 + File umask that should be applied to default permissions on + files when hbase.file.permissions.enable is true + + + + hbase.permission.umask.directory + 022 + Directory umask that should be applied to default permissions on + files when hbase.file.permissions.enable is true + + diff --git src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java index 6bc7c32..333ddd5 100644 --- src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java +++ src/test/java/org/apache/hadoop/hbase/TestFSTableDescriptorForceCreation.java @@ -27,7 +27,7 @@ import java.io.IOException; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.util.FSTableDescriptors; -import org.junit.*; +import org.junit.Test; import org.junit.experimental.categories.Category; @Category(SmallTests.class) @@ -43,7 +43,8 @@ public class TestFSTableDescriptorForceCreation { HTableDescriptor htd = new HTableDescriptor(name); assertTrue("Should create new table descriptor", - FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false)); + FSTableDescriptors.createTableDescriptor(UTIL.getConfiguration(), fs, + rootdir, htd, false)); } @Test @@ -57,7 +58,8 @@ public class TestFSTableDescriptorForceCreation { HTableDescriptor htd = new HTableDescriptor(name); htds.add(htd); assertFalse("Should not create new table descriptor", - FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false)); + FSTableDescriptors.createTableDescriptor(UTIL.getConfiguration(), fs, + rootdir, htd, false)); } @Test @@ -67,9 +69,11 @@ public class TestFSTableDescriptorForceCreation { FileSystem fs = FileSystem.get(UTIL.getConfiguration()); Path rootdir = new Path(UTIL.getDataTestDir(), name); HTableDescriptor htd = new HTableDescriptor(name); - FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, false); + FSTableDescriptors.createTableDescriptor(UTIL.getConfiguration(), fs, + rootdir, htd, false); assertTrue("Should create new table descriptor", - FSTableDescriptors.createTableDescriptor(fs, rootdir, htd, true)); + FSTableDescriptors.createTableDescriptor(UTIL.getConfiguration(), fs, + rootdir, htd, true)); } @org.junit.Rule diff --git src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java index a5628b9..d4f9775 100644 --- src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java +++ src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java @@ -35,7 +35,17 @@ import java.util.TreeMap; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException; +import org.apache.hadoop.hbase.Server; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.SmallTests; +import org.apache.hadoop.hbase.TableDescriptors; import org.apache.hadoop.hbase.catalog.CatalogTracker; import org.apache.hadoop.hbase.client.HConnection; import org.apache.hadoop.hbase.client.HConnectionManager; @@ -328,7 +338,7 @@ public class TestCatalogJanitor { // Reference name has this format: StoreFile#REF_NAME_PARSER Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName()); FileSystem fs = services.getMasterFileSystem().getFileSystem(); - Path path = ref.write(fs, p); + Path path = ref.write(htu.getConfiguration(), fs, p); assertTrue(fs.exists(path)); assertFalse(janitor.cleanParent(parent, r)); // Remove the reference file and try again. @@ -434,7 +444,8 @@ public class TestCatalogJanitor { // Now play around with the cleanParent function. Create a ref from splita // up to the parent. Path splitaRef = - createReferences(services, htd, parent, splita, Bytes.toBytes("ccc"), false); + createReferences(htu.getConfiguration(), services, htd, + parent, splita, Bytes.toBytes("ccc"), false); // Make sure actual super parent sticks around because splita has a ref. assertFalse(janitor.cleanParent(parent, regions.get(parent))); @@ -449,10 +460,10 @@ public class TestCatalogJanitor { FileSystem fs = FileSystem.get(htu.getConfiguration()); assertTrue(fs.delete(splitaRef, true)); // Create the refs from daughters of splita. - Path splitaaRef = - createReferences(services, htd, splita, splitaa, Bytes.toBytes("bbb"), false); - Path splitabRef = - createReferences(services, htd, splita, splitab, Bytes.toBytes("bbb"), true); + Path splitaaRef = createReferences(htu.getConfiguration(), services, htd, + splita, splitaa, Bytes.toBytes("bbb"), false); + Path splitabRef = createReferences(htu.getConfiguration(), services, htd, + splita, splitab, Bytes.toBytes("bbb"), true); // Test splita. It should stick around because references from splitab, etc. assertFalse(janitor.cleanParent(splita, regions.get(splita))); @@ -489,7 +500,8 @@ public class TestCatalogJanitor { * @return Path to reference we created. * @throws IOException */ - private Path createReferences(final MasterServices services, + private Path createReferences(final Configuration conf, + final MasterServices services, final HTableDescriptor htd, final HRegionInfo parent, final HRegionInfo daughter, final byte [] midkey, final boolean top) throws IOException { @@ -503,7 +515,7 @@ public class TestCatalogJanitor { // Reference name has this format: StoreFile#REF_NAME_PARSER Path p = new Path(storedir, Long.toString(now) + "." + parent.getEncodedName()); FileSystem fs = services.getMasterFileSystem().getFileSystem(); - ref.write(fs, p); + ref.write(conf, fs, p); return p; } diff --git src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java index 5b3b962..a5eeef5 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java @@ -150,7 +150,8 @@ public class TestStoreFile extends HBaseTestCase { kv = KeyValue.createKeyValueFromKey(reader.getLastKey()); byte [] finalRow = kv.getRow(); // Make a reference - Path refPath = StoreFile.split(fs, dir, hsf, midRow, Range.top); + Path refPath = StoreFile.split(new Configuration(false), fs, dir, hsf, + midRow, Range.top); StoreFile refHsf = new StoreFile(this.fs, refPath, conf, cacheConf, StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE); // Now confirm that I can read from the reference and that it only gets @@ -178,14 +179,16 @@ public class TestStoreFile extends HBaseTestCase { if (this.fs.exists(topDir)) { this.fs.delete(topDir, true); } - Path topPath = StoreFile.split(this.fs, topDir, f, midRow, Range.top); + Path topPath = StoreFile.split(new Configuration(false), this.fs, topDir, + f, midRow, Range.top); // Create bottom split. Path bottomDir = Store.getStoreHomedir(this.testDir, "2", Bytes.toBytes(f.getPath().getParent().getName())); if (this.fs.exists(bottomDir)) { this.fs.delete(bottomDir, true); } - Path bottomPath = StoreFile.split(this.fs, bottomDir, + Path bottomPath = StoreFile.split(new Configuration(false), this.fs, + bottomDir, f, midRow, Range.bottom); // Make readers on top and bottom. StoreFile.Reader top = @@ -245,8 +248,10 @@ public class TestStoreFile extends HBaseTestCase { // First, do a key that is < than first key. Ensure splits behave // properly. byte [] badmidkey = Bytes.toBytes(" ."); - topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top); - bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey, + topPath = StoreFile.split(new Configuration(false), this.fs, topDir, f, + badmidkey, Range.top); + bottomPath = StoreFile.split(new Configuration(false), this.fs, + bottomDir, f, badmidkey, Range.bottom); top = new StoreFile(this.fs, topPath, conf, cacheConf, StoreFile.BloomType.NONE, @@ -292,8 +297,10 @@ public class TestStoreFile extends HBaseTestCase { // Test when badkey is > than last key in file ('||' > 'zz'). badmidkey = Bytes.toBytes("|||"); - topPath = StoreFile.split(this.fs, topDir, f, badmidkey, Range.top); - bottomPath = StoreFile.split(this.fs, bottomDir, f, badmidkey, + topPath = StoreFile.split(new Configuration(false), this.fs, topDir, f, + badmidkey, Range.top); + bottomPath = StoreFile.split(new Configuration(false), this.fs, + bottomDir, f, badmidkey, Range.bottom); top = new StoreFile(this.fs, topPath, conf, cacheConf, StoreFile.BloomType.NONE, diff --git src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java index f1ea701..ed1b1f5 100644 --- src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java +++ src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java @@ -175,7 +175,8 @@ public class TestHLogSplit { new HLog.Entry(new HLogKey(encoded, HConstants.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID), new WALEdit()); - Path p = HLogSplitter.getRegionSplitEditsPath(fs, entry, hbaseDir, true); + Path p = HLogSplitter.getRegionSplitEditsPath(fs, + TEST_UTIL.getConfiguration(), entry, hbaseDir, true); String parentOfParent = p.getParent().getParent().getName(); assertEquals(parentOfParent, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()); } diff --git src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java index 0db4d42..7aed3f7 100644 --- src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java +++ src/test/java/org/apache/hadoop/hbase/util/TestFSTableDescriptors.java @@ -17,7 +17,11 @@ */ package org.apache.hadoop.hbase.util; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import java.io.FileNotFoundException; import java.io.IOException; @@ -28,7 +32,13 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.TableDescriptors; +import org.apache.hadoop.hbase.TableExistsException; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -62,7 +72,8 @@ public class TestFSTableDescriptors { FileStatus [] statuses = fs.listStatus(testdir); assertTrue("statuses.length="+statuses.length, statuses.length == 1); for (int i = 0; i < 10; i++) { - FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); + FSTableDescriptors.updateHTableDescriptor(UTIL.getConfiguration(), fs, + testdir, htd); } statuses = fs.listStatus(testdir); assertTrue(statuses.length == 1); @@ -76,14 +87,17 @@ public class TestFSTableDescriptors { Path testdir = UTIL.getDataTestDir("testSequenceidAdvancesOnTableInfo"); HTableDescriptor htd = new HTableDescriptor("testSequenceidAdvancesOnTableInfo"); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); - Path p0 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); + Path p0 = FSTableDescriptors.updateHTableDescriptor( + UTIL.getConfiguration(), fs, testdir, htd); int i0 = FSTableDescriptors.getTableInfoSequenceid(p0); - Path p1 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); + Path p1 = FSTableDescriptors.updateHTableDescriptor( + UTIL.getConfiguration(), fs, testdir, htd); // Assert we cleaned up the old file. assertTrue(!fs.exists(p0)); int i1 = FSTableDescriptors.getTableInfoSequenceid(p1); assertTrue(i1 == i0 + 1); - Path p2 = FSTableDescriptors.updateHTableDescriptor(fs, testdir, htd); + Path p2 = FSTableDescriptors.updateHTableDescriptor( + UTIL.getConfiguration(), fs, testdir, htd); // Assert we cleaned up the old file. assertTrue(!fs.exists(p1)); int i2 = FSTableDescriptors.getTableInfoSequenceid(p2); @@ -183,7 +197,8 @@ public class TestFSTableDescriptors { for (int i = 0; i < count; i++) { HTableDescriptor htd = new HTableDescriptor(name + i); htd.addFamily(new HColumnDescriptor("" + i)); - FSTableDescriptors.updateHTableDescriptor(fs, rootdir, htd); + FSTableDescriptors.updateHTableDescriptor(UTIL.getConfiguration(), fs, + rootdir, htd); } // Wait a while so mod time we write is for sure different. Thread.sleep(100); diff --git src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java index e2611e6..45eb6bb 100644 --- src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java +++ src/test/java/org/apache/hadoop/hbase/util/TestFSUtils.java @@ -19,14 +19,21 @@ */ package org.apache.hadoop.hbase.util; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import org.apache.hadoop.hbase.*; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HDFSBlocksDistribution; +import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -146,6 +153,19 @@ public class TestFSUtils { } + @Test + public void testPermMask() throws Exception { + Configuration conf = HBaseConfiguration.create(); + conf.set(FSUtils.DIRECTORY_PERMISSION_UMASK_KEY, "033"); + conf.set(FSUtils.FILE_PERMISSION_UMASK_KEY, "077"); + conf.setBoolean(HConstants.ENABLE_FILE_PERMISSIONS, true); + + FileSystem fs = FileSystem.get(conf); + FsPermission filePerm = FSUtils.getFilePermissions(fs, conf); + assertEquals(new FsPermission("700"), filePerm); + FsPermission dirPerm = FSUtils.getDirectoryPermissions(fs, conf); + assertEquals(new FsPermission("744"), dirPerm); + } @org.junit.Rule public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =