diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java index e1e45bc..dafc419 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java @@ -25,8 +25,9 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.Trash; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.shims.HadoopShims; +import org.apache.hadoop.hive.shims.ShimLoader; public class HiveMetaStoreFsImpl implements MetaStoreFS { @@ -37,16 +38,10 @@ public class HiveMetaStoreFsImpl implements MetaStoreFS { public boolean deleteDir(FileSystem fs, Path f, boolean recursive, Configuration conf) throws MetaException { LOG.info("deleting " + f); - - // older versions of Hadoop don't have a Trash constructor based on the - // Path or FileSystem. So need to achieve this by creating a dummy conf. - // this needs to be filtered out based on version - Configuration dupConf = new Configuration(conf); - FileSystem.setDefaultUri(dupConf, fs.getUri()); + HadoopShims hadoopShim = ShimLoader.getHadoopShims(); try { - Trash trashTmp = new Trash(dupConf); - if (trashTmp.moveToTrash(f)) { + if (hadoopShim.moveToAppropriateTrash(fs, f, conf)) { LOG.info("Moved to trash: " + f); return true; } diff --git a/shims/src/0.20/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java b/shims/src/0.20/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java index d695ff5..791ad7e 100644 --- a/shims/src/0.20/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java +++ b/shims/src/0.20/java/org/apache/hadoop/hive/shims/Hadoop20Shims.java @@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.Trash; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil; import org.apache.hadoop.io.Text; @@ -603,4 +604,17 @@ public class Hadoop20Shims implements HadoopShims { // No such functionality in ancient hadoop return; } + + @Override + public boolean moveToAppropriateTrash(FileSystem fs, Path path, Configuration conf) + throws IOException { + // older versions of Hadoop don't have a Trash constructor based on the + // Path or FileSystem. So need to achieve this by creating a dummy conf. + // this needs to be filtered out based on version + + Configuration dupConf = new Configuration(conf); + FileSystem.setDefaultUri(dupConf, fs.getUri()); + Trash trash = new Trash(dupConf); + return trash.moveToTrash(path); + } } diff --git a/shims/src/0.20S/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java b/shims/src/0.20S/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java index be8d703..09ef150 100644 --- a/shims/src/0.20S/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java +++ b/shims/src/0.20S/java/org/apache/hadoop/hive/shims/Hadoop20SShims.java @@ -17,12 +17,14 @@ */ package org.apache.hadoop.hive.shims; +import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.Trash; import org.apache.hadoop.hive.shims.HadoopShimsSecure; import org.apache.hadoop.mapred.ClusterStatus; import org.apache.hadoop.mapreduce.Job; @@ -97,6 +99,19 @@ public class Hadoop20SShims extends HadoopShimsSecure { } @Override + public boolean moveToAppropriateTrash(FileSystem fs, Path path, Configuration conf) + throws IOException { + // older versions of Hadoop don't have a Trash constructor based on the + // Path or FileSystem. So need to achieve this by creating a dummy conf. + // this needs to be filtered out based on version + + Configuration dupConf = new Configuration(conf); + FileSystem.setDefaultUri(dupConf, fs.getUri()); + Trash trash = new Trash(dupConf); + return trash.moveToTrash(path); + } + + @Override public long getDefaultBlockSize(FileSystem fs, Path path) { return fs.getDefaultBlockSize(); } @@ -104,5 +119,5 @@ public class Hadoop20SShims extends HadoopShimsSecure { @Override public short getDefaultReplication(FileSystem fs, Path path) { return fs.getDefaultReplication(); - } + } } diff --git a/shims/src/0.23/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java b/shims/src/0.23/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java index 6b44e09..f4eb637 100644 --- a/shims/src/0.23/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java +++ b/shims/src/0.23/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java @@ -17,12 +17,14 @@ */ package org.apache.hadoop.hive.shims; +import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.Trash; import org.apache.hadoop.hive.shims.HadoopShims.JobTrackerState; import org.apache.hadoop.mapred.ClusterStatus; import org.apache.hadoop.mapred.JobConf; @@ -123,4 +125,9 @@ public class Hadoop23Shims extends HadoopShimsSecure { return fs.getDefaultReplication(path); } + @Override + public boolean moveToAppropriateTrash(FileSystem fs, Path path, Configuration conf) + throws IOException { + return Trash.moveToAppropriateTrash(fs, path, conf); + } } diff --git a/shims/src/common-secure/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java b/shims/src/common-secure/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java index bf62f08..8d840ca 100644 --- a/shims/src/common-secure/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java +++ b/shims/src/common-secure/java/org/apache/hadoop/hive/shims/HadoopShimsSecure.java @@ -555,6 +555,10 @@ public abstract class HadoopShimsSecure implements HadoopShims { abstract public String getJobLauncherRpcAddress(Configuration conf); @Override + abstract public boolean moveToAppropriateTrash(FileSystem fs, Path path, Configuration conf) + throws IOException; + + @Override abstract public short getDefaultReplication(FileSystem fs, Path path); @Override diff --git a/shims/src/common/java/org/apache/hadoop/hive/shims/HadoopShims.java b/shims/src/common/java/org/apache/hadoop/hive/shims/HadoopShims.java index 1519a6e..019f088 100644 --- a/shims/src/common/java/org/apache/hadoop/hive/shims/HadoopShims.java +++ b/shims/src/common/java/org/apache/hadoop/hive/shims/HadoopShims.java @@ -275,7 +275,7 @@ public interface HadoopShims { * @return */ public String getJobLauncherHttpAddress(Configuration conf); - + /** * Get the default block size for the path. FileSystem alone is not sufficient to * determine the same, as in case of CSMT the underlying file system determines that. @@ -293,11 +293,22 @@ public interface HadoopShims { * @return */ public short getDefaultReplication(FileSystem fs, Path path); - /** - * InputSplitShim. - * + * Move the directory/file to trash. In case of the symlinks or mount points, the file is + * moved to the trashbin in the actual volume of the path p being deleted + * @param fs + * @param path + * @param conf + * @return false if the item is already in the trash or trash is disabled + * @throws IOException */ + public boolean moveToAppropriateTrash(FileSystem fs, Path path, Configuration conf) + throws IOException; + + /** + * InputSplitShim. + * + */ public interface InputSplitShim extends InputSplit { JobConf getJob();