Details
-
Bug
-
Status: In Progress
-
Major
-
Resolution: Unresolved
-
3.3.2
-
None
-
None
Description
Performing join using checkpointed dataframe leads to error in prepared 'execution plan' because columns ids/names in 'execution plan' are not unique.
This issue can be reproduced with this simple code (fails on 3.3.2, succeeds on 3.1.2):
import spark.implicits._ spark.sparkContext.setCheckpointDir("file:///tmp/cdir") val df = spark.range(10).toDF("id") val cdf = df.checkpoint() cdf.join(df) // org.apache.spark.sql.AnalysisException thrown on 3.3.2
The failure message is:
org.apache.spark.sql.AnalysisException: Failure when resolving conflicting references in Join: 'Join Inner :- LogicalRDD [id#2L], false +- Project [id#0L AS id#2L] +- Range (0, 10, step=1, splits=Some(16))Conflicting attributes: id#2L ; 'Join Inner :- LogicalRDD [id#2L], false +- Project [id#0L AS id#2L] +- Range (0, 10, step=1, splits=Some(16)) at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.failAnalysis(CheckAnalysis.scala:57) at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.failAnalysis$(CheckAnalysis.scala:56) at org.apache.spark.sql.catalyst.analysis.Analyzer.failAnalysis(Analyzer.scala:188) at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis$1(CheckAnalysis.scala:540) at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.$anonfun$checkAnalysis$1$adapted(CheckAnalysis.scala:102) at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:367) at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis(CheckAnalysis.scala:102) at org.apache.spark.sql.catalyst.analysis.CheckAnalysis.checkAnalysis$(CheckAnalysis.scala:97) at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis(Analyzer.scala:188) at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:214) at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330) at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:211) at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:76) at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:111) at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$2(QueryExecution.scala:185) at org.apache.spark.sql.execution.QueryExecution$.withInternalError(QueryExecution.scala:510) at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:185) at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779) at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:184) at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:76) at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:74) at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:66) at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:91) at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779) at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:89) at org.apache.spark.sql.Dataset.withPlan(Dataset.scala:3887) at org.apache.spark.sql.Dataset.join(Dataset.scala:920) ... 49 elided