diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java index a8b7ac6..93ed01f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SparkMapJoinResolver.java @@ -110,6 +110,8 @@ private void moveWork(SparkWork sparkWork, BaseWork work, SparkWork targetWork) // Create a new SparkWork for all the small tables of this work SparkWork parentWork = new SparkWork(physicalContext.conf.getVar(HiveConf.ConfVars.HIVEQUERYID)); + // copy cloneToWork to ensure RDD cache still works + parentWork.setCloneToWork(sparkWork.getCloneToWork()); // Update dependency graph if (!dependencyGraph.containsKey(targetWork)) { diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java index d2a1c0f..d1ada44 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/SparkWork.java @@ -58,7 +58,7 @@ private Map> requiredCounterPrefix; - private final Map cloneToWork; + private Map cloneToWork; public SparkWork(String name) { this.name = name + ":" + (++counter); @@ -331,4 +331,8 @@ public int compareTo(Dependency o) { public Map getCloneToWork() { return cloneToWork; } + + public void setCloneToWork(Map cloneToWork) { + this.cloneToWork = cloneToWork; + } }