diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java index 70c23a6..3975b6b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractBucketJoinProc.java @@ -131,14 +131,15 @@ private boolean checkNumberOfBucketsAgainstBigTable( protected boolean canConvertMapJoinToBucketMapJoin( MapJoinOperator mapJoinOp, - ParseContext pGraphContext, BucketJoinProcCtx context) throws SemanticException { - - QBJoinTree joinCtx = pGraphContext.getMapJoinContext().get(mapJoinOp); + + QBJoinTree joinCtx = null; + if (this.pGraphContext.getMapJoinContext().contains(mapJoinOp)) { + joinCtx = mapJoinOp.getConf().getJoinTree(); + } if (joinCtx == null) { return false; } - List joinAliases = new ArrayList(); String[] srcs = joinCtx.getBaseSrc(); String[] left = joinCtx.getLeftAliases(); @@ -172,7 +173,6 @@ protected boolean canConvertMapJoinToBucketMapJoin( Map> keysMap = mapJoinOp.getConf().getKeys(); return checkConvertBucketMapJoin( - pGraphContext, context, joinCtx, keysMap, @@ -189,7 +189,6 @@ protected boolean canConvertMapJoinToBucketMapJoin( * d. The number of buckets in the big table can be divided by no of buckets in small tables. */ protected boolean checkConvertBucketMapJoin( - ParseContext pGraphContext, BucketJoinProcCtx context, QBJoinTree joinCtx, Map> keysMap, @@ -438,7 +437,7 @@ protected void convertMapJoinToBucketMapJoin( } // convert partition to partition spec string - private static Map> convert(Map> mapping) { + private Map> convert(Map> mapping) { Map> converted = new HashMap>(); for (Map.Entry> entry : mapping.entrySet()) { converted.put(entry.getKey().getName(), entry.getValue()); @@ -467,7 +466,7 @@ protected void convertMapJoinToBucketMapJoin( } // called for each partition of big table and populates mapping for each file in the partition - private static void fillMappingBigTableBucketFileNameToSmallTableBucketFileNames( + private void fillMappingBigTableBucketFileNameToSmallTableBucketFileNames( List smallTblBucketNums, List> smallTblFilesList, Map> bigTableBucketFileNameToSmallTableBucketFileNames, diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java index c9e8086..b780cd8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/AbstractSMBJoinProc.java @@ -98,8 +98,10 @@ protected boolean canConvertBucketMapJoinToSMBJoin(MapJoinOperator mapJoinOp, } boolean tableEligibleForBucketedSortMergeJoin = true; - QBJoinTree joinCxt = this.pGraphContext.getMapJoinContext() - .get(mapJoinOp); + QBJoinTree joinCxt = null; + if (this.pGraphContext.getMapJoinContext().contains(mapJoinOp)) { + joinCxt = mapJoinOp.getConf().getJoinTree(); + } if (joinCxt == null) { return false; } @@ -117,7 +119,6 @@ protected boolean canConvertBucketMapJoinToSMBJoin(MapJoinOperator mapJoinOp, for (int pos = 0; pos < srcs.length; pos++) { tableEligibleForBucketedSortMergeJoin = tableEligibleForBucketedSortMergeJoin && isEligibleForBucketSortMergeJoin(smbJoinContext, - pGraphContext, mapJoinOp.getConf().getKeys().get((byte) pos), joinCxt, srcs, @@ -141,8 +142,7 @@ protected boolean canConvertBucketMapJoinToSMBJoin(MapJoinOperator mapJoinOp, // Convert the bucket map-join operator to a sort-merge map join operator protected SMBMapJoinOperator convertBucketMapJoinToSMBJoin(MapJoinOperator mapJoinOp, - SortBucketJoinProcCtx smbJoinContext, - ParseContext parseContext) { + SortBucketJoinProcCtx smbJoinContext) { String[] srcs = smbJoinContext.getSrcs(); SMBMapJoinOperator smbJop = new SMBMapJoinOperator(mapJoinOp); @@ -219,10 +219,13 @@ protected SMBMapJoinOperator convertBucketMapJoinToSMBJoin(MapJoinOperator mapJo child.getParentOperators().remove(index); child.getParentOperators().add(index, smbJop); } - parseContext.getSmbMapJoinContext().put(smbJop, - parseContext.getMapJoinContext().get(mapJoinOp)); - parseContext.getMapJoinContext().remove(mapJoinOp); - parseContext.getOpParseCtx().put(smbJop, parseContext.getOpParseCtx().get(mapJoinOp)); + QBJoinTree joinCxt = null; + if (this.pGraphContext.getMapJoinContext().contains(mapJoinOp)) { + joinCxt = mapJoinOp.getConf().getJoinTree(); + } + pGraphContext.getSmbMapJoinContext().put(smbJop, joinCxt); + pGraphContext.getMapJoinContext().remove(mapJoinOp); + pGraphContext.getOpParseCtx().put(smbJop, pGraphContext.getOpParseCtx().get(mapJoinOp)); return smbJop; } @@ -242,7 +245,6 @@ protected SMBMapJoinOperator convertBucketMapJoinToSMBJoin(MapJoinOperator mapJo */ private boolean isEligibleForBucketSortMergeJoin( SortBucketJoinProcCtx smbJoinContext, - ParseContext pctx, List keys, QBJoinTree joinTree, String[] aliases, @@ -386,8 +388,7 @@ private boolean checkSortColsAndJoinCols(List sortCols, // It is already verified that the join can be converted to a bucket map join protected boolean checkConvertJoinToSMBJoin( JoinOperator joinOperator, - SortBucketJoinProcCtx smbJoinContext, - ParseContext pGraphContext) throws SemanticException { + SortBucketJoinProcCtx smbJoinContext) throws SemanticException { QBJoinTree joinCtx = pGraphContext.getJoinContext().get(joinOperator); @@ -404,7 +405,6 @@ protected boolean checkConvertJoinToSMBJoin( for (int pos = 0; pos < srcs.length; pos++) { if (!isEligibleForBucketSortMergeJoin(smbJoinContext, - pGraphContext, smbJoinContext.getKeyExprMap().get((byte) pos), joinCtx, srcs, @@ -421,12 +421,10 @@ protected boolean checkConvertJoinToSMBJoin( // Can the join operator be converted to a sort-merge join operator ? protected boolean canConvertJoinToSMBJoin( JoinOperator joinOperator, - SortBucketJoinProcCtx smbJoinContext, - ParseContext pGraphContext) throws SemanticException { + SortBucketJoinProcCtx smbJoinContext) throws SemanticException { boolean canConvert = canConvertJoinToBucketMapJoin( joinOperator, - pGraphContext, smbJoinContext ); @@ -434,13 +432,12 @@ protected boolean canConvertJoinToSMBJoin( return false; } - return checkConvertJoinToSMBJoin(joinOperator, smbJoinContext, pGraphContext); + return checkConvertJoinToSMBJoin(joinOperator, smbJoinContext); } // Can the join operator be converted to a bucket map-merge join operator ? protected boolean canConvertJoinToBucketMapJoin( JoinOperator joinOp, - ParseContext pGraphContext, SortBucketJoinProcCtx context) throws SemanticException { // This has already been inspected and rejected @@ -508,7 +505,6 @@ protected boolean canConvertJoinToBucketMapJoin( // The candidate map-join was derived from the pluggable sort merge join big // table matcher. return checkConvertBucketMapJoin( - pGraphContext, context, joinCtx, keyExprMap, @@ -519,19 +515,19 @@ protected boolean canConvertJoinToBucketMapJoin( // Convert the join operator to a bucket map-join join operator protected MapJoinOperator convertJoinToBucketMapJoin( JoinOperator joinOp, - SortBucketJoinProcCtx joinContext, - ParseContext parseContext) throws SemanticException { + SortBucketJoinProcCtx joinContext) throws SemanticException { MapJoinOperator mapJoinOp = MapJoinProcessor.convertMapJoin( - parseContext.getConf(), - parseContext.getOpParseCtx(), + pGraphContext.getConf(), + pGraphContext.getOpParseCtx(), joinOp, pGraphContext.getJoinContext().get(joinOp), joinContext.getBigTablePosition(), false, false); // Remove the join operator from the query join context - parseContext.getMapJoinContext().put(mapJoinOp, parseContext.getJoinContext().get(joinOp)); - parseContext.getJoinContext().remove(joinOp); + mapJoinOp.getConf().setJoinTree(pGraphContext.getJoinContext().get(joinOp)); + pGraphContext.getMapJoinContext().add(mapJoinOp); + pGraphContext.getJoinContext().remove(joinOp); convertMapJoinToBucketMapJoin(mapJoinOp, joinContext); return mapJoinOp; } @@ -539,11 +535,10 @@ protected MapJoinOperator convertJoinToBucketMapJoin( // Convert the join operator to a sort-merge join operator protected void convertJoinToSMBJoin( JoinOperator joinOp, - SortBucketJoinProcCtx smbJoinContext, - ParseContext parseContext) throws SemanticException { - MapJoinOperator mapJoinOp = convertJoinToBucketMapJoin(joinOp, smbJoinContext, parseContext); + SortBucketJoinProcCtx smbJoinContext) throws SemanticException { + MapJoinOperator mapJoinOp = convertJoinToBucketMapJoin(joinOp, smbJoinContext); SMBMapJoinOperator smbMapJoinOp = - convertBucketMapJoinToSMBJoin(mapJoinOp, smbJoinContext, parseContext); + convertBucketMapJoinToSMBJoin(mapJoinOp, smbJoinContext); smbMapJoinOp.setConvertedAutomaticallySMBJoin(true); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java index 1260c83..7e3c134 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java @@ -42,7 +42,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // can the mapjoin present be converted to a bucketed mapjoin boolean convert = canConvertMapJoinToBucketMapJoin( - mapJoinOperator, pGraphContext, context); + mapJoinOperator, context); HiveConf conf = context.getConf(); // Throw an error if the user asked for bucketed mapjoin to be enforced and diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 9a74e1e..de0fd41 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -1062,7 +1062,9 @@ private static void splitTasks(ReduceSinkOperator op, if (reducerOp instanceof JoinOperator) { joinTree = parseCtx.getJoinContext().get(reducerOp); } else if (reducerOp instanceof MapJoinOperator) { - joinTree = parseCtx.getMapJoinContext().get(reducerOp); + if (parseCtx.getMapJoinContext().contains(reducerOp)) { + joinTree = ((MapJoinOperator)reducerOp).getConf().getJoinTree(); + } } else if (reducerOp instanceof SMBMapJoinOperator) { joinTree = parseCtx.getSmbMapJoinContext().get(reducerOp); } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java index ccb3ce5..e14e311 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java @@ -95,19 +95,14 @@ // (column type + column name). The column name is not really used anywhere, but it // needs to be passed. Use the string defined below for that. private static final String MAPJOINKEY_FIELDPREFIX = "mapjoinkey"; - - private ParseContext pGraphContext; - - /** - * empty constructor. - */ + public MapJoinProcessor() { - pGraphContext = null; } @SuppressWarnings("nls") - private Operator - putOpInsertMap(Operator op, RowResolver rr) { + private static Operator putOpInsertMap ( + ParseContext pGraphContext, Operator op, + RowResolver rr) { OpParseContext ctx = new OpParseContext(rr); pGraphContext.getOpParseCtx().put(op, ctx); return op; @@ -624,7 +619,7 @@ private void genSelectPlan(ParseContext pctx, MapJoinOperator input) throws Sema SelectDesc select = new SelectDesc(exprs, outputs, false); - SelectOperator sel = (SelectOperator) putOpInsertMap(OperatorFactory.getAndMakeChild(select, + SelectOperator sel = (SelectOperator) putOpInsertMap(pctx, OperatorFactory.getAndMakeChild(select, new RowSchema(inputRR.getColumnInfos()), input), inputRR); sel.setColumnExprMap(colExprMap); @@ -689,19 +684,18 @@ private int mapSideJoin(JoinOperator op, QBJoinTree joinTree) throws SemanticExc */ @Override public ParseContext transform(ParseContext pactx) throws SemanticException { - pGraphContext = pactx; List listMapJoinOps = new ArrayList(); // traverse all the joins and convert them if necessary - if (pGraphContext.getJoinContext() != null) { + if (pactx.getJoinContext() != null) { Map joinMap = new HashMap(); - Map mapJoinMap = pGraphContext.getMapJoinContext(); + Set mapJoinMap = pactx.getMapJoinContext(); if (mapJoinMap == null) { - mapJoinMap = new HashMap(); - pGraphContext.setMapJoinContext(mapJoinMap); + mapJoinMap = new HashSet(); + pactx.setMapJoinContext(mapJoinMap); } - Set> joinCtx = pGraphContext.getJoinContext().entrySet(); + Set> joinCtx = pactx.getJoinContext().entrySet(); Iterator> joinCtxIter = joinCtx.iterator(); while (joinCtxIter.hasNext()) { Map.Entry joinEntry = joinCtxIter.next(); @@ -711,14 +705,15 @@ public ParseContext transform(ParseContext pactx) throws SemanticException { if (mapJoinPos >= 0) { MapJoinOperator mapJoinOp = generateMapJoinOperator(pactx, joinOp, qbJoin, mapJoinPos); listMapJoinOps.add(mapJoinOp); - mapJoinMap.put(mapJoinOp, qbJoin); + mapJoinOp.getConf().setJoinTree(qbJoin); + mapJoinMap.add(mapJoinOp); } else { joinMap.put(joinOp, qbJoin); } } // store the new joinContext - pGraphContext.setJoinContext(joinMap); + pactx.setJoinContext(joinMap); } // Go over the list and find if a reducer is not needed @@ -744,15 +739,15 @@ public ParseContext transform(ParseContext pactx) throws SemanticException { // The dispatcher fires the processor corresponding to the closest matching // rule and passes the context along Dispatcher disp = new DefaultRuleDispatcher(getDefault(), opRules, new MapJoinWalkerCtx( - listMapJoinOpsNoRed, pGraphContext)); + listMapJoinOpsNoRed, pactx)); GraphWalker ogw = new GenMapRedWalker(disp); ArrayList topNodes = new ArrayList(); topNodes.addAll(listMapJoinOps); ogw.startWalking(topNodes, null); - pGraphContext.setListMapJoinOpsNoReducer(listMapJoinOpsNoRed); - return pGraphContext; + pactx.setListMapJoinOpsNoReducer(listMapJoinOpsNoRed); + return pactx; } /** @@ -820,8 +815,14 @@ private Boolean findGrandChildSubqueryMapjoin(MapJoinWalkerCtx ctx, MapJoinOpera private boolean nonSubqueryMapJoin(ParseContext pGraphContext, MapJoinOperator mapJoin, MapJoinOperator parentMapJoin) { - QBJoinTree joinTree = pGraphContext.getMapJoinContext().get(mapJoin); - QBJoinTree parentJoinTree = pGraphContext.getMapJoinContext().get(parentMapJoin); + QBJoinTree joinTree = null; + if (pGraphContext.getMapJoinContext().contains(mapJoin)) { + joinTree = mapJoin.getConf().getJoinTree(); + } + QBJoinTree parentJoinTree = null; + if (pGraphContext.getMapJoinContext().contains(parentMapJoin)) { + parentJoinTree = parentMapJoin.getConf().getJoinTree(); + } if (joinTree.getJoinSrc() != null && joinTree.getJoinSrc().equals(parentJoinTree)) { return true; } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java index 5291851..3483c60 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/NonBlockingOpDeDupProc.java @@ -29,7 +29,7 @@ import java.util.Stack; import org.apache.hadoop.hive.ql.exec.FilterOperator; -import org.apache.hadoop.hive.ql.exec.JoinOperator; +import org.apache.hadoop.hive.ql.exec.MapJoinOperator; import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.SelectOperator; import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; @@ -55,15 +55,12 @@ */ public class NonBlockingOpDeDupProc implements Transform { - private ParseContext pctx; - @Override public ParseContext transform(ParseContext pctx) throws SemanticException { - this.pctx = pctx; String SEL = SelectOperator.getOperatorName(); String FIL = FilterOperator.getOperatorName(); Map opRules = new LinkedHashMap(); - opRules.put(new RuleRegExp("R1", SEL + "%" + SEL + "%"), new SelectDedup()); + opRules.put(new RuleRegExp("R1", SEL + "%" + SEL + "%"), new SelectDedup(pctx)); opRules.put(new RuleRegExp("R2", FIL + "%" + FIL + "%"), new FilterDedup()); Dispatcher disp = new DefaultRuleDispatcher(null, opRules, null); @@ -76,6 +73,13 @@ public ParseContext transform(ParseContext pctx) throws SemanticException { } private class SelectDedup implements NodeProcessor { + + private ParseContext pctx; + + public SelectDedup (ParseContext pctx) { + this.pctx = pctx; + } + @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { @@ -178,28 +182,33 @@ private boolean checkReferences(ExprNodeDesc expr, Set funcOutputs, Set< } return true; } - } - - /** - * Change existing references in the context to point from child to parent operator. - * @param cSEL child operator (to be removed, and merged into parent) - * @param pSEL parent operator - */ - private void fixContextReferences(SelectOperator cSEL, SelectOperator pSEL) { - Collection qbJoinTrees = new ArrayList(); - qbJoinTrees.addAll(pctx.getJoinContext().values()); - qbJoinTrees.addAll(pctx.getMapJoinContext().values()); - for (QBJoinTree qbJoinTree : qbJoinTrees) { - Map> aliasToOpInfo = qbJoinTree.getAliasToOpInfo(); - for (Map.Entry> entry : aliasToOpInfo.entrySet()) { - if (entry.getValue() == cSEL) { - aliasToOpInfo.put(entry.getKey(), pSEL); + + /** + * Change existing references in the context to point from child to parent operator. + * @param cSEL child operator (to be removed, and merged into parent) + * @param pSEL parent operator + */ + private void fixContextReferences(SelectOperator cSEL, SelectOperator pSEL) { + Collection qbJoinTrees = new ArrayList(); + qbJoinTrees.addAll(pctx.getJoinContext().values()); + for (MapJoinOperator mapJoinOp : pctx.getMapJoinContext()) { + if (mapJoinOp.getConf().getJoinTree() != null) { + qbJoinTrees.add(mapJoinOp.getConf().getJoinTree()); + } + } + for (QBJoinTree qbJoinTree : qbJoinTrees) { + Map> aliasToOpInfo = qbJoinTree.getAliasToOpInfo(); + for (Map.Entry> entry : aliasToOpInfo.entrySet()) { + if (entry.getValue() == cSEL) { + aliasToOpInfo.put(entry.getKey(), pSEL); + } } } } } private class FilterDedup implements NodeProcessor { + @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapjoinProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapjoinProc.java index 11ce47e..f6ca039 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapjoinProc.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapjoinProc.java @@ -60,7 +60,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } if (convert) { - convertBucketMapJoinToSMBJoin(mapJoinOp, smbJoinContext, pGraphContext); + convertBucketMapJoinToSMBJoin(mapJoinOp, smbJoinContext); } return null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeJoinProc.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeJoinProc.java index 8a0c474..d090598 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeJoinProc.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeJoinProc.java @@ -44,10 +44,10 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, SortBucketJoinProcCtx smbJoinContext = (SortBucketJoinProcCtx) procCtx; boolean convert = canConvertJoinToSMBJoin( - joinOp, smbJoinContext, pGraphContext); + joinOp, smbJoinContext); if (convert) { - convertJoinToSMBJoin(joinOp, smbJoinContext, pGraphContext); + convertJoinToSMBJoin(joinOp, smbJoinContext); } return null; } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java index 8215c26..9b48200 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.hooks.LineageInfo; import org.apache.hadoop.hive.ql.hooks.ReadEntity; -import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext; @@ -76,7 +75,7 @@ private HashMap> topSelOps; private LinkedHashMap, OpParseContext> opParseCtx; private Map joinContext; - private Map mapJoinContext; + private Set mapJoinContext; private Map smbMapJoinContext; private HashMap topToTable; private Map fsopToTable; @@ -570,11 +569,11 @@ public LineageInfo getLineageInfo() { return lInfo; } - public Map getMapJoinContext() { + public Set getMapJoinContext() { return mapJoinContext; } - public void setMapJoinContext(Map mapJoinContext) { + public void setMapJoinContext(Set mapJoinContext) { this.mapJoinContext = mapJoinContext; } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java index c144d8c..d51fcd7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java @@ -27,6 +27,7 @@ import java.util.Map; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.parse.QBJoinTree; /** * Join operator Descriptor implementation. @@ -85,9 +86,12 @@ // this operator cannot be converted to mapjoin cause output is expected to be sorted on join key // it's resulted from RS-dedup optimization, which removes following RS under some condition private boolean fixedAsSorted; - + // used only for explain. private transient ExprNodeDesc [][] joinKeys; + + private transient QBJoinTree joinTree; + public JoinDesc() { } @@ -509,4 +513,12 @@ public boolean isFixedAsSorted() { public void setFixedAsSorted(boolean fixedAsSorted) { this.fixedAsSorted = fixedAsSorted; } + + public QBJoinTree getJoinTree() { + return joinTree; + } + + public void setJoinTree(QBJoinTree joinTree) { + this.joinTree = joinTree; + } }