diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 59b1394..15462ec 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -121,6 +121,12 @@ public class HTableDescriptor implements WritableComparable { public static final String READONLY = "READONLY"; private static final ImmutableBytesWritable READONLY_KEY = new ImmutableBytesWritable(Bytes.toBytes(READONLY)); + + public static final String SKIP_ARCHIVE = "SKIP_ARCHIVE"; + private static final ImmutableBytesWritable SKIP_ARCHIVE_KEY = + new ImmutableBytesWritable(Bytes.toBytes(SKIP_ARCHIVE)); + + private static boolean DEFAULT_SKIP_ARCHIVE = false; /** * INTERNAL Used by HBase Shell interface to access this metadata @@ -226,6 +232,7 @@ public class HTableDescriptor implements WritableComparable { String.valueOf(DEFAULT_MEMSTORE_FLUSH_SIZE)); DEFAULT_VALUES.put(DEFERRED_LOG_FLUSH, String.valueOf(DEFAULT_DEFERRED_LOG_FLUSH)); + DEFAULT_VALUES.put(SKIP_ARCHIVE, String.valueOf(DEFAULT_SKIP_ARCHIVE)); DEFAULT_VALUES.put(DURABILITY, DEFAULT_DURABLITY.name()); //use the enum name for (String s : DEFAULT_VALUES.keySet()) { RESERVED_KEYWORDS.add(new ImmutableBytesWritable(Bytes.toBytes(s))); @@ -367,7 +374,7 @@ public class HTableDescriptor implements WritableComparable { } return this.root.booleanValue(); } - + /** * INTERNAL Used to denote if the current table represents * -ROOT- region. This is used internally by the @@ -1372,6 +1379,21 @@ public class HTableDescriptor implements WritableComparable { // MasterFileSystem.java:bootstrap()). return null; } + /** + * Method to enable skip_archive + * @param skip Set to True for skipping the archive. Defaults to false + */ + public void setSkipArchive(boolean skip){ + setValue(SKIP_ARCHIVE_KEY, skip? TRUE: FALSE); + } + + /** + * Method to check if skip_archive is enabled + * @return True if skip_archive is enabled, false otherwise + */ + public boolean isSkipArchiveEnabled(){ + return isSomething(SKIP_ARCHIVE_KEY, DEFAULT_SKIP_ARCHIVE); + } /** * @return This instance serialized with pb with pb magic prefix diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java index f9cc60f..16a93da 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java @@ -22,6 +22,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.LinkedList; import java.util.List; import org.apache.commons.logging.Log; @@ -32,6 +33,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate; +import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.StoreFile; import org.apache.hadoop.hbase.util.Bytes; @@ -57,6 +61,12 @@ public class HFileArchiver { /** Number of retries in case of fs operation failure */ private static final int DEFAULT_RETRIES_NUMBER = 3; + /** Configuration key for skipping archive and deleting the files directly */ + private static final String HFILE_SKIP_ARCHIVE_CONF_KEY = "hbase.hfile.skip.archive"; + + /** Cleaners to run incase skipArchive is enabled */ + private static List cleanerList = null; + private HFileArchiver() { // hidden ctor since this is just a util } @@ -67,28 +77,31 @@ public class HFileArchiver { * @param conf the configuration to use * @param fs the file system object * @param info HRegionInfo for region to be deleted + * @param htd HTableDescriptor object for the table to which this region belongs * @throws IOException */ - public static void archiveRegion(Configuration conf, FileSystem fs, HRegionInfo info) - throws IOException { + public static void archiveRegion(Configuration conf, FileSystem fs, HRegionInfo info, + HTableDescriptor htd) throws IOException { Path rootDir = FSUtils.getRootDir(conf); - archiveRegion(fs, rootDir, FSUtils.getTableDir(rootDir, info.getTable()), - HRegion.getRegionDir(rootDir, info)); + archiveRegion(conf,fs, rootDir, FSUtils.getTableDir(rootDir, info.getTable()), + HRegion.getRegionDir(rootDir, info), htd); } /** * Remove an entire region from the table directory via archiving the region's hfiles. + * @param conf Configuration object to check the skipArchive key * @param fs {@link FileSystem} from which to remove the region * @param rootdir {@link Path} to the root directory where hbase files are stored (for building * the archive path) - * @param tableDir {@link Path} to where the table is being stored (for building the archive path) - * @param regionDir {@link Path} to where a region is being stored (for building the archive path) + * @param tableDir {@link Path} to where the table is being stored(for building the archive path) + * @param regionDir {@link Path} to where a region is being stored(for building the archive path) + * @param htd HTableDescriptor object for the underlying table * @return true if the region was sucessfully deleted. false if the filesystem * operations could not complete. * @throws IOException if the request cannot be completed */ - public static boolean archiveRegion(FileSystem fs, Path rootdir, Path tableDir, Path regionDir) - throws IOException { + public static boolean archiveRegion(Configuration conf,FileSystem fs, Path rootdir, + Path tableDir, Path regionDir, HTableDescriptor htd) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("ARCHIVING " + regionDir.toString()); } @@ -134,7 +147,7 @@ public class HFileArchiver { LOG.debug("Archiving " + toArchive); boolean success = false; try { - success = resolveAndArchive(fs, regionArchiveDir, toArchive); + success = resolveAndArchive(conf, fs, regionArchiveDir, toArchive, htd); } catch (IOException e) { LOG.error("Failed to archive " + toArchive, e); success = false; @@ -157,10 +170,11 @@ public class HFileArchiver { * @param parent Parent region hosting the store files * @param tableDir {@link Path} to where the table is being stored (for building the archive path) * @param family the family hosting the store files + * @param htd HTableDescriptor object for the underlying table * @throws IOException if the files could not be correctly disposed. */ public static void archiveFamily(FileSystem fs, Configuration conf, - HRegionInfo parent, Path tableDir, byte[] family) throws IOException { + HRegionInfo parent, Path tableDir, byte[] family, HTableDescriptor htd) throws IOException { Path familyDir = new Path(tableDir, new Path(parent.getEncodedName(), Bytes.toString(family))); FileStatus[] storeFiles = FSUtils.listStatus(fs, familyDir); if (storeFiles == null) { @@ -174,7 +188,7 @@ public class HFileArchiver { Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, parent, tableDir, family); // do the actual archive - if (!resolveAndArchive(fs, storeArchiveDir, toArchive)) { + if (!resolveAndArchive(conf, fs, storeArchiveDir, toArchive, htd)) { throw new IOException("Failed to archive/delete all the files for region:" + Bytes.toString(parent.getRegionName()) + ", family:" + Bytes.toString(family) + " into " + storeArchiveDir + ". Something is probably awry on the filesystem."); @@ -189,15 +203,18 @@ public class HFileArchiver { * @param family the family hosting the store files * @param compactedFiles files to be disposed of. No further reading of these files should be * attempted; otherwise likely to cause an {@link IOException} + * @param htd HTableDescriptor object for the underlying table * @throws IOException if the files could not be correctly disposed. */ public static void archiveStoreFiles(Configuration conf, FileSystem fs, HRegionInfo regionInfo, - Path tableDir, byte[] family, Collection compactedFiles) throws IOException { + Path tableDir, byte[] family, Collection compactedFiles, + HTableDescriptor htd) throws IOException { // sometimes in testing, we don't have rss, so we need to check for that if (fs == null) { - LOG.warn("Passed filesystem is null, so just deleting the files without archiving for region:" - + Bytes.toString(regionInfo.getRegionName()) + ", family:" + Bytes.toString(family)); + LOG.warn("Passed filesystem is null, so just deleting the files without archiving" + + "for region:" + Bytes.toString(regionInfo.getRegionName()) + ", family:" + + Bytes.toString(family)); deleteStoreFilesWithoutArchiving(compactedFiles); return; } @@ -212,7 +229,8 @@ public class HFileArchiver { if (regionInfo == null || family == null) throw new IOException( "Need to have a region and a family to archive from."); - Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family); + Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, + tableDir, family); // make sure we don't archive if we can't and that the archive dir exists if (!fs.mkdirs(storeArchiveDir)) { @@ -228,7 +246,7 @@ public class HFileArchiver { Collection storeFiles = Collections2.transform(compactedFiles, getStorePath); // do the actual archive - if (!resolveAndArchive(fs, storeArchiveDir, storeFiles)) { + if (!resolveAndArchive(conf, fs, storeArchiveDir, storeFiles, htd)) { throw new IOException("Failed to archive/delete all the files for region:" + Bytes.toString(regionInfo.getRegionName()) + ", family:" + Bytes.toString(family) + " into " + storeArchiveDir + ". Something is probably awry on the filesystem."); @@ -243,11 +261,13 @@ public class HFileArchiver { * @param tableDir {@link Path} to where the table is being stored (for building the archive path) * @param family the family hosting the store files * @param storeFile file to be archived + * @param htd HTableDescriptor object for the underlying table * @throws IOException if the files could not be correctly disposed. */ public static void archiveStoreFile(Configuration conf, FileSystem fs, HRegionInfo regionInfo, - Path tableDir, byte[] family, Path storeFile) throws IOException { - Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, tableDir, family); + Path tableDir, byte[] family, Path storeFile, HTableDescriptor htd) throws IOException { + Path storeArchiveDir = HFileArchiveUtil.getStoreArchivePath(conf, regionInfo, + tableDir, family); // make sure we don't archive if we can't and that the archive dir exists if (!fs.mkdirs(storeArchiveDir)) { throw new IOException("Could not make archive directory (" + storeArchiveDir + ") for store:" @@ -257,7 +277,7 @@ public class HFileArchiver { // do the actual archive long start = EnvironmentEdgeManager.currentTimeMillis(); File file = new FileablePath(fs, storeFile); - if (!resolveAndArchiveFile(storeArchiveDir, file, Long.toString(start))) { + if (!resolveAndArchiveFile(conf,storeArchiveDir, file, Long.toString(start), htd)) { throw new IOException("Failed to archive/delete the file for region:" + regionInfo.getRegionNameAsString() + ", family:" + Bytes.toString(family) + " into " + storeArchiveDir + ". Something is probably awry on the filesystem."); @@ -271,24 +291,26 @@ public class HFileArchiver { * If any of the passed files to archive are directories, archives all the files under that * directory. Archive directory structure for children is the base archive directory name + the * parent directory and is built recursively is passed files are directories themselves. + * @param conf {@link Configuration} to examine archive properties * @param fs {@link FileSystem} on which to archive the files * @param baseArchiveDir base archive directory to archive the given files * @param toArchive files to be archived + * @param htd HTableDescriptor for the underlying table * @return true on success, false otherwise * @throws IOException on unexpected failure */ - private static boolean resolveAndArchive(FileSystem fs, Path baseArchiveDir, - Collection toArchive) throws IOException { + private static boolean resolveAndArchive(Configuration conf, FileSystem fs, Path baseArchiveDir, + Collection toArchive, HTableDescriptor htd) throws IOException { if (LOG.isTraceEnabled()) LOG.trace("Starting to archive " + toArchive); long start = EnvironmentEdgeManager.currentTimeMillis(); - List failures = resolveAndArchive(fs, baseArchiveDir, toArchive, start); + List failures = resolveAndArchive(conf, fs, baseArchiveDir, toArchive, start, htd); // notify that some files were not archived. // We can't delete the files otherwise snapshots or other backup system // that relies on the archiver end up with data loss. if (failures.size() > 0) { LOG.warn("Failed to complete archive of: " + failures + - ". Those files are still in the original location, and they may slow down reads."); + ". Those files are still in the original location, and they may slow down reads."); return false; } return true; @@ -297,6 +319,7 @@ public class HFileArchiver { /** * Resolve any conflict with an existing archive file via timestamp-append * renaming of the existing file and then archive the passed in files. + * @param conf {@link Configuration} to examine to examine archive settings * @param fs {@link FileSystem} on which to archive the files * @param baseArchiveDir base archive directory to store the files. If any of * the files to archive are directories, will append the name of the @@ -305,11 +328,13 @@ public class HFileArchiver { * @param toArchive files/directories that need to be archvied * @param start time the archiving started - used for resolving archive * conflicts. + * @param htd HTableDescriptor for the underlying table * @return the list of failed to archive files. * @throws IOException if an unexpected file operation exception occured */ - private static List resolveAndArchive(FileSystem fs, Path baseArchiveDir, - Collection toArchive, long start) throws IOException { + private static List resolveAndArchive(Configuration conf, FileSystem fs, + Path baseArchiveDir, Collection toArchive, long start, HTableDescriptor htd) + throws IOException { // short circuit if no files to move if (toArchive.size() == 0) return Collections.emptyList(); @@ -332,7 +357,7 @@ public class HFileArchiver { if (LOG.isTraceEnabled()) LOG.trace("Archiving: " + file); if (file.isFile()) { // attempt to archive the file - if (!resolveAndArchiveFile(baseArchiveDir, file, startTime)) { + if (!resolveAndArchiveFile(conf, baseArchiveDir, file, startTime, htd)) { LOG.warn("Couldn't archive " + file + " into backup directory: " + baseArchiveDir); failures.add(file); } @@ -344,7 +369,7 @@ public class HFileArchiver { // and then get all the files from that directory and attempt to // archive those too Collection children = file.getChildren(); - failures.addAll(resolveAndArchive(fs, parentArchiveDir, children, start)); + failures.addAll(resolveAndArchive(conf, fs, parentArchiveDir, children, start, htd)); } } catch (IOException e) { LOG.warn("Failed to archive " + file, e); @@ -359,19 +384,39 @@ public class HFileArchiver { *

* If the same file already exists in the archive, it is moved to a timestamped directory under * the archive directory and the new file is put in its place. + * @param conf {@link Configuration} to examine to examine archive settings * @param archiveDir {@link Path} to the directory that stores the archives of the hfiles * @param currentFile {@link Path} to the original HFile that will be archived * @param archiveStartTime time the archiving started, to resolve naming conflicts + * @param htd HTableDescriptor for the underlying table * @return true if the file is successfully archived. false if there was a * problem, but the operation still completed. * @throws IOException on failure to complete {@link FileSystem} operations. */ - private static boolean resolveAndArchiveFile(Path archiveDir, File currentFile, - String archiveStartTime) throws IOException { + private static boolean resolveAndArchiveFile(Configuration conf, Path archiveDir, + File currentFile, String archiveStartTime, HTableDescriptor htd) throws IOException { + + FileSystem fs = currentFile.getFileSystem(); + if (conf.getBoolean(HFILE_SKIP_ARCHIVE_CONF_KEY, true) || ((htd != null) + && htd.isSkipArchiveEnabled())) { + initCleaners(conf); + FileStatus currentStatus = fs.getFileStatus(currentFile.getPath()); + if (runCleaners(currentStatus)){ + LOG.debug("Attempting to delete file without archiving :" + +currentFile.getPath().getName()); + // Attempt to delete the file, if successful return, + // else let the archiver archive it + if (fs.delete(currentFile.getPath())){ + return true; + } else { + LOG.error("Attempt to delete file "+currentFile.getPath().getName()+" failed. " + + "Moving it to the archive."); + } + } + } // build path as it should be in the archive String filename = currentFile.getName(); Path archiveFile = new Path(archiveDir, filename); - FileSystem fs = currentFile.getFileSystem(); // if the file already exists in the archive, move that one to a timestamped backup. This is a // really, really unlikely situtation, where we get the same name for the existing file, but @@ -399,7 +444,7 @@ public class HFileArchiver { if (LOG.isTraceEnabled()) { LOG.trace("No existing file in archive for: " + archiveFile + - ", free to archive original file."); + ", free to archive original file."); } // at this point, we should have a free spot for the archive file @@ -440,6 +485,40 @@ public class HFileArchiver { return true; } + private static boolean runCleaners(FileStatus currentStatus) { + // Run each cleaner against the filestat and return true + // if all of them pass + for(BaseHFileCleanerDelegate cleaner : cleanerList){ + if (!cleaner.isFileDeletable(currentStatus)){ + LOG.debug("File "+currentStatus.getPath().getName()+ + " is not deletable according to cleaner "+cleaner); + return false; + } + } + return true; + } + + /** + * Utility method to instantiate cleaners if null + */ + private static void initCleaners(Configuration conf) { + if (cleanerList == null){ + cleanerList = new LinkedList(); + String[] logCleaners = conf.getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS); + for (String cleanerName : logCleaners){ + try{ + Class c = Class.forName(cleanerName).asSubclass( + BaseHFileCleanerDelegate.class); + BaseHFileCleanerDelegate cleaner = c.newInstance(); + cleaner.setConf(conf); + cleanerList.add(cleaner); + } catch(Exception e){ + LOG.warn("Cannot create cleaner instance "+cleanerName); + } + } + } + } + /** * Without regard for backup, delete a region. Should be used with caution. * @param regionDir {@link Path} to the region to be deleted. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java index 59bc01e..9f892e6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java @@ -197,8 +197,10 @@ public class CatalogJanitor extends Chore { LOG.debug("Deleting region " + regionA.getRegionNameAsString() + " and " + regionB.getRegionNameAsString() + " from fs because merged region no longer holds references"); - HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA); - HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB); + HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionA, + getTableDescriptor(regionA.getTable())); + HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, regionB, + getTableDescriptor(regionB.getTable())); MetaEditor.deleteMergeQualifiers(server.getCatalogTracker(), mergedRegion); return true; } @@ -330,7 +332,8 @@ public class CatalogJanitor extends Chore { " because daughter splits no longer hold references"); FileSystem fs = this.services.getMasterFileSystem().getFileSystem(); if (LOG.isTraceEnabled()) LOG.trace("Archiving parent region: " + parent); - HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent); + HFileArchiver.archiveRegion(this.services.getConfiguration(), fs, parent, + getTableDescriptor(parent.getTable())); MetaEditor.deleteRegion(this.server.getCatalogTracker(), parent); result = true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java index 59e93ad..d8f62a9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java @@ -496,12 +496,16 @@ public class MasterFileSystem { private void checkTempDir(final Path tmpdir, final Configuration c, final FileSystem fs) throws IOException { // If the temp directory exists, clear the content (left over, from the previous run) + HTableDescriptor htd; if (fs.exists(tmpdir)) { // Archive table in temp, maybe left over from failed deletion, // if not the cleaner will take care of them. for (Path tabledir: FSUtils.getTableDirs(fs, tmpdir)) { + // Attempt to get the HTableDescriptor object if it exists. + // Incase it returns null, its taken care of in archiver + htd = this.services.getTableDescriptors().get(FSUtils.getTableName(tabledir)); for (Path regiondir: FSUtils.getRegionDirs(fs, tabledir)) { - HFileArchiver.archiveRegion(fs, this.rootdir, tabledir, regiondir); + HFileArchiver.archiveRegion(c,fs, this.rootdir, tabledir, regiondir, htd); } } if (!fs.delete(tmpdir, true)) { @@ -551,7 +555,8 @@ public class MasterFileSystem { public void deleteRegion(HRegionInfo region) throws IOException { - HFileArchiver.archiveRegion(conf, fs, region); + HFileArchiver.archiveRegion(conf, fs, region, + this.services.getTableDescriptors().get(region.getTable())); } public void deleteTable(TableName tableName) throws IOException { @@ -590,7 +595,8 @@ public class MasterFileSystem { throws IOException { // archive family store files Path tableDir = FSUtils.getTableDir(rootdir, region.getTable()); - HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName); + HTableDescriptor htd = this.services.getTableDescriptors().get(region.getTable()); + HFileArchiver.archiveFamily(fs, conf, region, tableDir, familyName, htd); // delete the family folder Path familyDir = new Path(tableDir, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java index c6955d0..946a5b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseFileCleanerDelegate.java @@ -44,6 +44,5 @@ implements FileCleanerDelegate { * @param fStat file status of the file to check * @return true if the file is deletable, false if not */ - protected abstract boolean isFileDeletable(FileStatus fStat); - + public abstract boolean isFileDeletable(FileStatus fStat); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java index 23c45b8..5ec6a38 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java @@ -27,6 +27,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.Server; @@ -93,6 +94,8 @@ public class DeleteTableHandler extends TableEventHandler { region.getRegionNameAsString() + " in transitions"); } } + // Get the HTableDesc object for the table + HTableDescriptor htd = ((HMaster)this.server).getTableDescriptors().get(tableName); // 2. Remove regions from META LOG.debug("Deleting regions from META"); @@ -102,13 +105,15 @@ public class DeleteTableHandler extends TableEventHandler { MasterFileSystem mfs = this.masterServices.getMasterFileSystem(); Path tempTableDir = mfs.moveTableToTemp(tableName); + + try { // 4. Delete regions from FS (temp directory) FileSystem fs = mfs.getFileSystem(); for (HRegionInfo hri: regions) { LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS"); - HFileArchiver.archiveRegion(fs, mfs.getRootDir(), - tempTableDir, new Path(tempTableDir, hri.getEncodedName())); + HFileArchiver.archiveRegion(server.getConfiguration(),fs, mfs.getRootDir(), + tempTableDir, new Path(tempTableDir, hri.getEncodedName()), htd); } // 5. Delete table from FS (temp directory) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index c2c2813..5695ea7 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -4644,9 +4644,9 @@ public class HRegion implements HeapSize { // , Writable{ } // Archiving the 'A' region - HFileArchiver.archiveRegion(a.getBaseConf(), fs, a.getRegionInfo()); + HFileArchiver.archiveRegion(a.getBaseConf(), fs, a.getRegionInfo(), a.getTableDesc()); // Archiving the 'B' region - HFileArchiver.archiveRegion(b.getBaseConf(), fs, b.getRegionInfo()); + HFileArchiver.archiveRegion(b.getBaseConf(), fs, b.getRegionInfo(), b.getTableDesc()); LOG.info("merge completed. New region is " + dstRegion); return dstRegion; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java index 172e408..ebb5ec0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.backup.HFileArchiver; +import org.apache.hadoop.hbase.client.HBaseAdmin; import org.apache.hadoop.hbase.fs.HFileSystem; import org.apache.hadoop.hbase.io.Reference; import org.apache.hadoop.hbase.util.Bytes; @@ -245,12 +246,13 @@ public class HRegionFileSystem { /** * Remove the region family from disk, archiving the store files. + * @param htd HTableDescriptor for the underlying table. Used to read table specific overrides * @param familyName Column Family Name * @throws IOException if an error occours during the archiving */ - public void deleteFamily(final String familyName) throws IOException { + public void deleteFamily(final HTableDescriptor htd, final String familyName) throws IOException { // archive family store files - HFileArchiver.archiveFamily(fs, conf, regionInfo, tableDir, Bytes.toBytes(familyName)); + HFileArchiver.archiveFamily(fs, conf, regionInfo, tableDir, Bytes.toBytes(familyName), htd); // delete the family folder Path familyDir = getStoreDir(familyName); @@ -365,10 +367,10 @@ public class HRegionFileSystem { * @param filePath {@link Path} to the store file to remove * @throws IOException if the archiving fails */ - public void removeStoreFile(final String familyName, final Path filePath) - throws IOException { + public void removeStoreFile(final HTableDescriptor htd, final String familyName, + final Path filePath) throws IOException { HFileArchiver.archiveStoreFile(this.conf, this.fs, this.regionInfo, - this.tableDir, Bytes.toBytes(familyName), filePath); + this.tableDir, Bytes.toBytes(familyName), filePath, htd); } /** @@ -377,10 +379,10 @@ public class HRegionFileSystem { * @param storeFiles set of store files to remove * @throws IOException if the archiving fails */ - public void removeStoreFiles(final String familyName, final Collection storeFiles) - throws IOException { + public void removeStoreFiles(final HTableDescriptor htd, final String familyName, + final Collection storeFiles) throws IOException { HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.regionInfo, - this.tableDir, Bytes.toBytes(familyName), storeFiles); + this.tableDir, Bytes.toBytes(familyName), storeFiles, htd); } /** @@ -870,10 +872,13 @@ public class HRegionFileSystem { * @param fs {@link FileSystem} from which to remove the region * @param tableDir {@link Path} to where the table is being stored * @param regionInfo {@link HRegionInfo} for region to be deleted + * @param htd HTableDescriptor for the underlying table. Null is handled properly + * if passed with tests * @throws IOException if the request cannot be completed */ public static void deleteRegionFromFileSystem(final Configuration conf, - final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo) throws IOException { + final FileSystem fs, final Path tableDir, final HRegionInfo regionInfo, + final HTableDescriptor htd) throws IOException { HRegionFileSystem regionFs = new HRegionFileSystem(conf, fs, tableDir, regionInfo); Path regionDir = regionFs.getRegionDir(); @@ -888,7 +893,7 @@ public class HRegionFileSystem { // Archive region Path rootDir = FSUtils.getRootDir(conf); - HFileArchiver.archiveRegion(fs, rootDir, tableDir, regionDir); + HFileArchiver.archiveRegion(conf,fs, rootDir, tableDir, regionDir, htd); // Delete empty region dir if (!fs.delete(regionDir, true)) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 67f66a7..71381e6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -1492,7 +1492,8 @@ public class HStore implements Store { for (StoreFile compactedFile : compactedFiles) { compactedFile.closeReader(true); } - this.fs.removeStoreFiles(this.getColumnFamilyName(), compactedFiles); + this.fs.removeStoreFiles(this.getHRegion().getTableDesc(), this.getColumnFamilyName(), + compactedFiles); } catch (IOException e) { e = RemoteExceptionHandler.checkIOException(e); LOG.error("Failed removing compacted files in " + this + diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java index 83dbaa1..0190c53 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java @@ -349,7 +349,7 @@ public class RestoreSnapshotHelper { private void removeHdfsRegions(final List regions) throws IOException { if (regions != null && regions.size() > 0) { for (HRegionInfo hri: regions) { - HFileArchiver.archiveRegion(conf, fs, hri); + HFileArchiver.archiveRegion(conf, fs, hri, tableDesc); } } } @@ -395,7 +395,7 @@ public class RestoreSnapshotHelper { Path hfile = new Path(familyDir, hfileName); LOG.trace("Removing hfile=" + hfile + " from region=" + regionInfo.getEncodedName() + " table=" + tableName); - HFileArchiver.archiveStoreFile(conf, fs, regionInfo, tableDir, family, hfile); + HFileArchiver.archiveStoreFile(conf, fs, regionInfo, tableDir, family, hfile, tableDesc); } // Restore Missing files @@ -408,7 +408,7 @@ public class RestoreSnapshotHelper { // Family doesn't exists in the snapshot LOG.trace("Removing family=" + Bytes.toString(family) + " from region=" + regionInfo.getEncodedName() + " table=" + tableName); - HFileArchiver.archiveFamily(fs, conf, regionInfo, tableDir, family); + HFileArchiver.archiveFamily(fs, conf, regionInfo, tableDir, family, tableDesc); fs.delete(familyDir, true); } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java index b5cfa9a..206f570 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/backup/TestHFileArchiving.java @@ -35,10 +35,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.MediumTests; import org.apache.hadoop.hbase.Stoppable; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; import org.apache.hadoop.hbase.master.cleaner.HFileCleaner; import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy; import org.apache.hadoop.hbase.regionserver.HRegion; @@ -112,6 +114,58 @@ public class TestHFileArchiving { } @Test + public void testSkipArchiveOnCompact() throws Exception { + TableName TEST_TABLE = TableName.valueOf("test_skiparchive_compact"); + HTableDescriptor htd = new HTableDescriptor(TEST_TABLE); + int beforeCount, afterCount; + // Table with a single column + byte [][] COL_FAMS = Bytes.toByteArrays(TEST_FAM); + // set the skiparchive flag + htd.setSkipArchive(true); + UTIL.createTable(htd, COL_FAMS, UTIL.getConfiguration()); + + final HTable t = new HTable(UTIL.getConfiguration(), TEST_TABLE); + // Load the table with some test data + UTIL.loadTable(t, TEST_FAM); + // Flush table to ensure storefiles are written + UTIL.flush(TEST_TABLE); + // Load data again, this will create another version of same rows + UTIL.loadTable(t, TEST_FAM); + // Flush table again to make sure another(set of) store file(s) is(are) written + UTIL.flush(TEST_TABLE); + + // Make sure skipArchive is set properly + assertTrue(t.getTableDescriptor().isSkipArchiveEnabled()); + List servingRegions = UTIL.getHBaseCluster().getRegions(TEST_TABLE); + // make sure we only have 1 region serving this table + assertEquals(1, servingRegions.size()); + HRegion region = servingRegions.get(0); + // save the count of rows + beforeCount = UTIL.countRows(t); + + Path tableDir = region.getRegionFileSystem().getTableDir(); + Path regionDir = HRegion.getRegionDir(tableDir, region.getRegionInfo().getEncodedName()); + Path storeDir = new Path(regionDir, Bytes.toString(TEST_FAM)); + + // Get the list of store files and make sure its greater than 1, to test if compaction works + FileStatus [] fst = UTIL.getTestFileSystem().listStatus(storeDir); + assertTrue(fst.length > 1); + + // Do a major compaction on the table + UTIL.compact(TEST_TABLE, true); + afterCount = UTIL.countRows(t); + + // Stat the region's store dir again to make sure there is only 1 store file + // and compaction worked correctly + fst = UTIL.getTestFileSystem().listStatus(storeDir); + assertTrue(fst.length == 1); + + // assert the row counts before and after to be the same to make sure there is no data loss + assertTrue(beforeCount == afterCount); + t.close(); + } + + @Test public void testRemovesRegionDirOnArchive() throws Exception { TableName TABLE_NAME = TableName.valueOf("testRemovesRegionDirOnArchive"); @@ -137,7 +191,8 @@ public class TestHFileArchiving { Path rootDir = region.getRegionFileSystem().getTableDir().getParent(); Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo()); - HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo()); + HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo(), + region.getTableDesc()); // check for the existence of the archive directory and some files in it Path archiveDir = HFileArchiveTestingUtil.getRegionArchiveDir(UTIL.getConfiguration(), region); @@ -203,7 +258,8 @@ public class TestHFileArchiving { } // then archive the region - HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo()); + HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo(), + region.getTableDesc()); // and check to make sure the region directoy got deleted assertFalse("Region directory (" + regionDir + "), still exists.", fs.exists(regionDir)); @@ -372,8 +428,10 @@ public class TestHFileArchiving { try { // Try to archive the file - HFileArchiver.archiveRegion(fs, rootDir, - sourceRegionDir.getParent(), sourceRegionDir); + // Passing a 'null' HTableDescriptor as per the API change in HBASE-8963 + // It won't affect the tests in anyway and the null is handled in the archiver + HFileArchiver.archiveRegion(UTIL.getConfiguration(),fs, rootDir, + sourceRegionDir.getParent(), sourceRegionDir, null); // The archiver succeded, the file is no longer in the original location // but it's in the archive location. diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestSkipArchiveTableDeleteHandler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestSkipArchiveTableDeleteHandler.java new file mode 100644 index 0000000..981c9bb --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/handler/TestSkipArchiveTableDeleteHandler.java @@ -0,0 +1,178 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.handler; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; + +import org.apache.hadoop.hbase.MediumTests; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.HBaseTestingUtility; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.HBaseAdmin; +import org.apache.hadoop.hbase.client.HTable; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.util.Bytes; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; + + +@Category(MediumTests.class) +public class TestSkipArchiveTableDeleteHandler { + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); + private static final TableName TABLENAME1 = + TableName.valueOf("test_table_skiparchive1"); + private static final TableName TABLENAME2 = + TableName.valueOf("test_table_skiparchive2"); + private static final TableName TABLENAME3 = + TableName.valueOf("test_table_skiparchive3"); + private static final byte[][] FAMILIES = new byte[][] { Bytes.toBytes("cf1"), + Bytes.toBytes("cf2"), Bytes.toBytes("cf3") }; + private static HTable t1,t2,t3; + + @BeforeClass + public static void beforeAllTests() throws Exception { + + // Set the skiparchive setting in conf + TEST_UTIL.getConfiguration().setBoolean("hbase.hfile.skip.archive", true); + TEST_UTIL.startMiniCluster(); + + // Create a tables of three families. This will assign a region. + TEST_UTIL.createTable(TABLENAME1, FAMILIES); + TEST_UTIL.createTable(TABLENAME2, FAMILIES); + TEST_UTIL.createTable(TABLENAME3, FAMILIES); + + // Build the HTable objects for the 3 tables + t1 = new HTable(TEST_UTIL.getConfiguration(), TABLENAME1); + t2 = new HTable(TEST_UTIL.getConfiguration(), TABLENAME2); + t3 = new HTable(TEST_UTIL.getConfiguration(), TABLENAME3); + + // Create multiple regions in all the three column families + while (TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager() + .getRegionStates().getRegionsInTransition().size() > 0) { + Thread.sleep(100); + } + // Load the table with data for all families + TEST_UTIL.loadTable(t1, FAMILIES); + TEST_UTIL.loadTable(t2, FAMILIES); + TEST_UTIL.loadTable(t3, FAMILIES); + TEST_UTIL.flush(); + + t1.close(); + t2.close(); + t3.close(); + } + + @AfterClass + public static void afterAllTests() throws Exception { + TEST_UTIL.shutdownMiniCluster(); + } + + @Before + public void setup() throws IOException, InterruptedException { + TEST_UTIL.enableDebug(org.apache.hadoop.hbase.master.handler.DeleteTableHandler.class); + } + + @Test + public void deleteTestWithSnapshot() throws Exception { + HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); + int beforeCount,afterCount,restoreCount; + // Take a snapshot of the table + byte[] snapshot = Bytes.toBytes(TABLENAME1.toString()+".snapshot"); + admin.snapshot(snapshot, Bytes.toBytes(TABLENAME1.toString())); + beforeCount = TEST_UTIL.countRows(t1); + // Delete a few rows from table, for this test we delete all the rows starting with 'a'. + t1.setAutoFlush(false); + for (byte[] row : HBaseTestingUtility.ROWS) { + if (row[0] == 'a'){ + Delete delete = new Delete(row); + t1.delete(delete); + } else{ + break; + } + } + t1.flushCommits(); + afterCount = TEST_UTIL.countRows(t1); + assertFalse(beforeCount == afterCount); + // Delete the table, since skiparchive is set unnecessary hfiles created + // due to deletes are deleted + TEST_UTIL.deleteTable(TABLENAME1); + // Restore the snapshot + admin.restoreSnapshot(snapshot); + // count the rows in the new table + restoreCount = TEST_UTIL.countRows(t1); + assertTrue(beforeCount == restoreCount); + } + + @Test + public void deleteTestNormalTable() throws Exception { + + HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); + //Delete the table from HBase + TEST_UTIL.deleteTable(TABLENAME2); + // Make sure the table is deleted properly + TableName[] tables = admin.listTableNames(); + for (TableName table:tables){ + if (table.getNameAsString().equals(TABLENAME2.getNameAsString())){ + assert(false); + } + } + } + + @Test + public void deleteTestWithHFileLinks() throws Exception { + + HBaseAdmin admin = TEST_UTIL.getHBaseAdmin(); + byte[] snapshot = Bytes.toBytes(TABLENAME3.toString()+".snapshot"); + int beforeCount,afterCount,cloneCount; + admin.snapshot(snapshot, Bytes.toBytes(TABLENAME3.toString())); + beforeCount = TEST_UTIL.countRows(t3); + // Clone the snapshot to a new table and the new table has HFileLinks to the original table; + admin.cloneSnapshot(snapshot,Bytes.toBytes(TABLENAME3.toString()+".clone")); + // Delete the snapshot + admin.deleteSnapshot(snapshot); + // Delete a few rows from the original table, for this test we + // delete all the rows starting with 'a'. + t3.setAutoFlush(false); + for (byte[] row : HBaseTestingUtility.ROWS) { + if (row[0] == 'a'){ + Delete delete = new Delete(row); + t3.delete(delete); + } else { + break; + } + } + t3.flushCommits(); + afterCount = TEST_UTIL.countRows(t3); + assertFalse(beforeCount == afterCount); + // Delete the original table, this should delete a few + // HFiles that were added due to deletes + TEST_UTIL.deleteTable(TABLENAME3); // count the number of rows in the clone post delete + cloneCount = TEST_UTIL.countRows(new HTable(TEST_UTIL.getConfiguration(), + Bytes.toBytes(TABLENAME3.toString()+".clone"))); + assert(cloneCount==beforeCount); + } + +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java index 0744361..1e0e0eb 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/migration/TestNamespaceUpgrade.java @@ -344,7 +344,7 @@ public class TestNamespaceUpgrade { region.close(); // Delete the region HRegionFileSystem.deleteRegionFromFileSystem(conf, fs, - FSUtils.getTableDir(rootDir, hriAcl.getTable()), hriAcl); + FSUtils.getTableDir(rootDir, hriAcl.getTable()), hriAcl, aclTable); } } } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java index 90ee7aa..48864f1 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionFileSystem.java @@ -77,7 +77,7 @@ public class TestHRegionFileSystem { // Delete the region HRegionFileSystem.deleteRegionFromFileSystem(conf, fs, - FSUtils.getTableDir(rootDir, hri.getTable()), hri); + FSUtils.getTableDir(rootDir, hri.getTable()), hri, null); assertFalse("The region folder should be removed", fs.exists(regionDir)); fs.delete(rootDir, true); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java index c6b0ac4..633fed6 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALReplay.java @@ -532,7 +532,7 @@ public class TestWALReplay { for (HColumnDescriptor hcd: htd.getFamilies()) { cf_count++; if (cf_count == 2) { - region.getRegionFileSystem().deleteFamily(hcd.getNameAsString()); + region.getRegionFileSystem().deleteFamily(htd, hcd.getNameAsString()); } } diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb index 0e517fb..8bbfb0d 100644 --- a/hbase-shell/src/main/ruby/hbase/admin.rb +++ b/hbase-shell/src/main/ruby/hbase/admin.rb @@ -474,6 +474,7 @@ module Hbase htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(MEMSTORE_FLUSHSIZE))) if arg[MEMSTORE_FLUSHSIZE] htd.setAsyncLogFlush(JBoolean.valueOf(arg.delete(DEFERRED_LOG_FLUSH))) if arg[DEFERRED_LOG_FLUSH] htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(DURABILITY))) if arg[DURABILITY] + htd.setSkipArchive(JBoolean.valueOf(arg.delete(SKIP_ARCHIVE))) if arg[SKIP_ARCHIVE] set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA] set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION]