diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java index 2f1497a..154aa1a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketingSortingReduceSinkOptimizer.java @@ -390,7 +390,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } } - Table destTable = pGraphContext.getFsopToTable().get(fsOp); + Table destTable = fsOp.getConf().getTable(); if (destTable == null) { return null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java index de20302..e16ba6c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java @@ -143,7 +143,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, return null; } - Table destTable = parseCtx.getFsopToTable().get(fsOp); + Table destTable = fsOp.getConf().getTable(); if (destTable == null) { LOG.debug("Bailing out of sort dynamic partition optimization as destination table is null"); return null; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java index 8215c26..2faaca7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hive.ql.QueryProperties; import org.apache.hadoop.hive.ql.exec.AbstractMapJoinOperator; import org.apache.hadoop.hive.ql.exec.FetchTask; -import org.apache.hadoop.hive.ql.exec.FileSinkOperator; import org.apache.hadoop.hive.ql.exec.GroupByOperator; import org.apache.hadoop.hive.ql.exec.JoinOperator; import org.apache.hadoop.hive.ql.exec.ListSinkOperator; @@ -43,7 +42,6 @@ import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.hooks.LineageInfo; import org.apache.hadoop.hive.ql.hooks.ReadEntity; -import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext; @@ -79,7 +77,6 @@ private Map mapJoinContext; private Map smbMapJoinContext; private HashMap topToTable; - private Map fsopToTable; private List reduceSinkOperatorsAddedByEnforceBucketingSorting; private HashMap> topToProps; private HashMap nameToSplitSample; @@ -169,7 +166,6 @@ public ParseContext( Map smbMapJoinContext, HashMap topToTable, HashMap> topToProps, - Map fsopToTable, List loadTableWork, List loadFileWork, Context ctx, HashMap idToTableNameMap, int destTableId, UnionProcContext uCtx, List> listMapJoinOpsNoReducer, @@ -191,7 +187,6 @@ public ParseContext( this.joinContext = joinContext; this.smbMapJoinContext = smbMapJoinContext; this.topToTable = topToTable; - this.fsopToTable = fsopToTable; this.topToProps = topToProps; this.loadFileWork = loadFileWork; this.loadTableWork = loadTableWork; @@ -312,14 +307,6 @@ public void setTopToTable(HashMap topToTable) { this.topToTable = topToTable; } - public Map getFsopToTable() { - return fsopToTable; - } - - public void setFsopToTable(Map fsopToTable) { - this.fsopToTable = fsopToTable; - } - public List getReduceSinkOperatorsAddedByEnforceBucketingSorting() { return reduceSinkOperatorsAddedByEnforceBucketingSorting; } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index cea86df..7e7b22b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -395,8 +395,7 @@ public void initParseCtx(ParseContext pctx) { public ParseContext getParseContext() { return new ParseContext(conf, qb, ast, opToPartPruner, opToPartList, topOps, topSelOps, opParseCtx, joinContext, smbMapJoinContext, topToTable, topToTableProps, - fsopToTable, loadTableWork, - loadFileWork, ctx, idToTableNameMap, destTableId, uCtx, + loadTableWork, loadFileWork, ctx, idToTableNameMap, destTableId, uCtx, listMapJoinOpsNoReducer, groupOpToInputTables, prunedPartitions, opToSamplePruner, globalLimitCtx, nameToSplitSample, inputs, rootTasks, opToPartToSkewedPruner, viewAliasToInput, @@ -6334,7 +6333,9 @@ private Operator genFileSinkPlan(String dest, QB qb, Operator input) + dest_path + " row schema: " + inputRR.toString()); } - fsopToTable.put((FileSinkOperator) output, dest_tab); + FileSinkOperator fso = (FileSinkOperator) output; + fso.getConf().setTable(dest_tab); + fsopToTable.put(fso, dest_tab); return output; } @@ -9966,7 +9967,7 @@ void analyzeInternal(ASTNode ast, PlannerContext plannerCtx) throws SemanticExce // 4. Generate Parse Context for Optimizer & Physical compiler ParseContext pCtx = new ParseContext(conf, qb, plannerCtx.child, opToPartPruner, opToPartList, topOps, topSelOps, opParseCtx, joinContext, smbMapJoinContext, topToTable, topToTableProps, - fsopToTable, loadTableWork, loadFileWork, ctx, idToTableNameMap, destTableId, uCtx, + loadTableWork, loadFileWork, ctx, idToTableNameMap, destTableId, uCtx, listMapJoinOpsNoReducer, groupOpToInputTables, prunedPartitions, opToSamplePruner, globalLimitCtx, nameToSplitSample, inputs, rootTasks, opToPartToSkewedPruner, viewAliasToInput, reduceSinkOperatorsAddedByEnforceBucketingSorting, queryProperties); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 23fbbe1..0dfb541 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -26,8 +26,6 @@ import java.util.List; import java.util.Set; -import com.google.common.collect.Interner; -import com.google.common.collect.Interners; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; @@ -60,6 +58,9 @@ import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; +import com.google.common.collect.Interner; +import com.google.common.collect.Interners; + /** * TaskCompiler is a the base class for classes that compile * operator pipelines into tasks. @@ -388,7 +389,6 @@ public ParseContext getParseContext(ParseContext pCtx, List