diff --git ql/src/java/org/apache/hadoop/hive/ql/Context.java ql/src/java/org/apache/hadoop/hive/ql/Context.java index 9183edf..ddd5802 100644 --- ql/src/java/org/apache/hadoop/hive/ql/Context.java +++ ql/src/java/org/apache/hadoop/hive/ql/Context.java @@ -783,6 +783,13 @@ public TokenRewriteStream getTokenRewriteStream() { } /** + * Obtain the unique executionId. + */ + public String getExecutionId() { + return executionId; + } + + /** * Generate a unique executionId. An executionId, together with user name and * the configuration, will determine the temporary locations of all intermediate * files. diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java index 78c511b..b85d689 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/UpdateDeleteSemanticAnalyzer.java @@ -286,20 +286,16 @@ private void cleanUpMetaColumnAccessControl() { /** * Parse the newly generated SQL statment to get a new AST */ - private ReparseResult parseRewrittenQuery(StringBuilder rewrittenQueryStr, String originalQuery) throws SemanticException { + private ReparseResult parseRewrittenQuery(StringBuilder rewrittenQueryStr, String executionId, String originalQuery) + throws SemanticException { + // Set dynamic partitioning to nonstrict so that queries do not need any partition + // references. + // todo: this may be a perf issue as it prevents the optimizer.. or not + HiveConf.setVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); // Parse the rewritten query string - Context rewrittenCtx; - try { - // Set dynamic partitioning to nonstrict so that queries do not need any partition - // references. - // todo: this may be a perf issue as it prevents the optimizer.. or not - HiveConf.setVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); - rewrittenCtx = new Context(conf); - rewrittenCtx.setExplainConfig(ctx.getExplainConfig()); - rewrittenCtx.setIsUpdateDeleteMerge(true); - } catch (IOException e) { - throw new SemanticException(ErrorMsg.UPDATEDELETE_IO_ERROR.getMsg()); - } + Context rewrittenCtx = new Context(conf, executionId); + rewrittenCtx.setExplainConfig(ctx.getExplainConfig()); + rewrittenCtx.setIsUpdateDeleteMerge(true); rewrittenCtx.setCmd(rewrittenQueryStr.toString()); ASTNode rewrittenTree; @@ -402,7 +398,7 @@ private void reparseAndSuperAnalyze(ASTNode tree) throws SemanticException { // Add a sort by clause so that the row ids come out in the correct order rewrittenQueryStr.append(" sort by ROW__ID "); - ReparseResult rr = parseRewrittenQuery(rewrittenQueryStr, ctx.getCmd()); + ReparseResult rr = parseRewrittenQuery(rewrittenQueryStr, ctx.getExecutionId(), ctx.getCmd()); Context rewrittenCtx = rr.rewrittenCtx; ASTNode rewrittenTree = rr.rewrittenTree; @@ -703,7 +699,7 @@ WHEN NOT MATCHED THEN INSERT VALUES(source.a2, source.b2) } boolean validating = handleCardinalityViolation(rewrittenQueryStr, target, onClauseAsText, targetTable, numWhenMatchedDeleteClauses == 0 && numWhenMatchedUpdateClauses == 0); - ReparseResult rr = parseRewrittenQuery(rewrittenQueryStr, ctx.getCmd()); + ReparseResult rr = parseRewrittenQuery(rewrittenQueryStr, ctx.getExecutionId(), ctx.getCmd()); Context rewrittenCtx = rr.rewrittenCtx; ASTNode rewrittenTree = rr.rewrittenTree; rewrittenCtx.setOperation(Context.Operation.MERGE);