diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java index 604c8ae..39b6d71 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java @@ -472,8 +472,7 @@ public void processPartitionPruningSink(GenSparkProcContext context, // set up temporary path to communicate between the small/big table Path tmpPath = targetWork.getTmpPathForPartitionPruning(); if (tmpPath == null) { - Path baseTmpPath = context.parseContext.getContext().getMRTmpPath(); - tmpPath = SparkUtilities.generateTmpPathForPartitionPruning(baseTmpPath, targetId); + tmpPath = getDPPOutputPath(context.parseContext.getContext()); targetWork.setTmpPathForPartitionPruning(tmpPath); LOG.info("Setting tmp path between source work and target work:\n" + tmpPath); } @@ -509,6 +508,10 @@ public void processPartitionPruningSink(GenSparkProcContext context, keys.add(desc.getTargetPartKey()); } + private Path getDPPOutputPath(Context context) { + return new Path(context.getMRScratchDir(), "_dpp_output_"); + } + public static SparkEdgeProperty getEdgeProperty(HiveConf conf, ReduceSinkOperator reduceSink, ReduceWork reduceWork) throws SemanticException { boolean useSparkGroupBy = conf.getBoolVar(HiveConf.ConfVars.SPARK_USE_GROUPBY_SHUFFLE);