diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 0df30f1ea0..114d455ff8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -503,8 +503,11 @@ private DataContainer handleDynParts(Hive db, Table table, LoadTableDesc tbd, (tbd.getLbCtx() == null) ? 0 : tbd.getLbCtx().calculateListBucketingLevel(), work.getLoadTableWork().getWriteType() != AcidUtils.Operation.NOT_ACID && !tbd.isMmTable(), - work.getLoadTableWork().getTxnId(), tbd.getStmtId(), hasFollowingStatsTask(), - work.getLoadTableWork().getWriteType()); + work.getLoadTableWork().getTxnId(), + tbd.getStmtId(), + hasFollowingStatsTask(), + work.getLoadTableWork().getWriteType(), + tbd.isInsertOverwrite()); // publish DP columns to its subscribers if (dps != null && dps.size() > 0) { diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 2e1fd37d4a..0234fba3b8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -4077,7 +4077,7 @@ private static void tryDelete(FileSystem fs, Path path) { throws IOException { int skipLevels = dpLevels + lbLevels; if (filter == null) { - filter = new JavaUtils.IdPathFilter(txnId, stmtId, true); + filter = new JavaUtils.IdPathFilter(txnId, stmtId, true, false, isBaseDir); } if (skipLevels == 0) { return statusToPath(fs.listStatus(path, filter)); diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 23983d85b3..690b26d38f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -2178,7 +2178,7 @@ private void constructOneLBLocationMap(FileStatus fSta, * @throws HiveException */ private Set getValidPartitionsInPath( - int numDP, int numLB, Path loadPath, Long txnId, int stmtId, boolean isMmTable) throws HiveException { + int numDP, int numLB, Path loadPath, Long txnId, int stmtId, boolean isMmTable, boolean isInsertOverwrite) throws HiveException { Set validPartitions = new HashSet(); try { FileSystem fs = loadPath.getFileSystem(conf); @@ -2199,7 +2199,7 @@ private void constructOneLBLocationMap(FileStatus fSta, // where this is used; we always want to load everything; also the only case where // we have multiple statements anyway is union. Path[] leafStatus = Utilities.getMmDirectoryCandidates( - fs, loadPath, numDP, numLB, null, txnId, -1, conf, false); + fs, loadPath, numDP, numLB, null, txnId, -1, conf, isInsertOverwrite); for (Path p : leafStatus) { Path dpPath = p.getParent(); // Skip the MM directory that we have found. for (int i = 0; i < numLB; ++i) { @@ -2247,7 +2247,7 @@ private void constructOneLBLocationMap(FileStatus fSta, public Map, Partition> loadDynamicPartitions(final Path loadPath, final String tableName, final Map partSpec, final LoadFileType loadFileType, final int numDP, final int numLB, final boolean isAcid, final long txnId, final int stmtId, - final boolean hasFollowingStatsTask, final AcidUtils.Operation operation) + final boolean hasFollowingStatsTask, final AcidUtils.Operation operation, boolean isInsertOverwrite) throws HiveException { final Map, Partition> partitionsMap = @@ -2263,7 +2263,7 @@ private void constructOneLBLocationMap(FileStatus fSta, // Get all valid partition paths and existing partitions for them (if any) final Table tbl = getTable(tableName); final Set validPartitions = getValidPartitionsInPath(numDP, numLB, loadPath, txnId, stmtId, - AcidUtils.isInsertOnlyTable(tbl.getParameters())); + AcidUtils.isInsertOnlyTable(tbl.getParameters()), isInsertOverwrite); final int partsToLoad = validPartitions.size(); final AtomicInteger partitionsLoaded = new AtomicInteger(0); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 83dfb47e1c..963a928649 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -6995,6 +6995,10 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) tableDesc.setWriter(fileSinkDesc); } + if (fileSinkDesc.getInsertOverwrite()) { + ltd.setInsertOverwrite(true); + } + if (SessionState.get().isHiveServerQuery() && null != table_desc && table_desc.getSerdeClassName().equalsIgnoreCase(ThriftJDBCBinarySerDe.class.getName()) && diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java index 1a9c11ec98..28ec9c0342 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java @@ -40,6 +40,7 @@ //table specs are to be used private int stmtId; private Long currentTransactionId; + private boolean isInsertOverwrite; // TODO: the below seem like they should just be combined into partitionDesc private org.apache.hadoop.hive.ql.plan.TableDesc table; @@ -209,6 +210,10 @@ public void setInheritTableSpecs(boolean inheritTableSpecs) { this.inheritTableSpecs = inheritTableSpecs; } + public boolean isInsertOverwrite() { return this.isInsertOverwrite; } + + public void setInsertOverwrite(boolean v) { this.isInsertOverwrite = v; } + /** * @return the lbCtx */