diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index cee95fd..62cd9f8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -32,6 +32,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; @@ -228,6 +229,8 @@ // Next we do this for tables and partitions LoadTableDesc tbd = work.getLoadTableWork(); + StringBuilder partPath=new StringBuilder(); + boolean partRmFlag=false; if (tbd != null) { StringBuilder mesg = new StringBuilder("Loading data to table ") .append( tbd.getTable().getTableName()); @@ -236,6 +239,11 @@ Map partSpec = tbd.getPartitionSpec(); for (String key: partSpec.keySet()) { mesg.append(key).append('=').append(partSpec.get(key)).append(", "); + if(partSpec.get(key) ==null ){ + partRmFlag=true; + }else{ + partPath.append(key).append('=').append(partSpec.get(key)).append("/"); + } } mesg.setLength(mesg.length()-2); mesg.append(')'); @@ -336,7 +344,20 @@ // deal with dynamic partitions DynamicPartitionCtx dpCtx = tbd.getDPCtx(); if (dpCtx != null && dpCtx.getNumDPCols() > 0) { // dynamic partitions - + + if(partRmFlag && tbd.getReplace()){ + partPath.insert(0,"/"); + partPath.insert(0,table.getPath()); + FileSystem fs2 = FileSystem.get(table.getDataLocation(), conf); + FileStatus[] dirs2 = fs2.globStatus(new Path(tbd.getSourceDir())); + if (fs2.exists(new Path(partPath.toString()))) { + FsShell fshell = new FsShell(); + fshell.setConf(conf); + fshell.run(new String[]{"-rmr", partPath.toString()}); + fshell.run(new String[]{"-mkdir", partPath.toString()}); + } + } + List> dps = Utilities.getFullDPSpecs(conf, dpCtx); // publish DP columns to its subscribers