diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index c696fd5..7725a55 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -76,6 +76,7 @@ import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMRUnionCtx; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx; +import org.apache.hadoop.hive.ql.optimizer.bucketpruner.BucketPruner; import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPruner; import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.TableSpec; @@ -592,6 +593,12 @@ public static void setMapWork(MapWork plan, ParseContext parseCtx, Set partToBucketPruner = + parseCtx.getOpToBucketPruner().get(topOp); + ExprNodeDesc bucketPruner = (partToBucketPruner != null) ? partToBucketPruner.get(part.getName()) + : null; + if (sampleDescr != null) { assert (listBucketingPruner == null) : "Sampling and list bucketing can't coexit."; paths = SamplePruner.prune(part, sampleDescr); @@ -600,6 +607,9 @@ public static void setMapWork(MapWork plan, ParseContext parseCtx, Set + * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.bucketpruner; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.optimizer.PrunerUtils; +import org.apache.hadoop.hive.ql.optimizer.Transform; +import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.LBOpPartitionWalkerCtx; +import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.LBPartitionProcFactory; +import org.apache.hadoop.hive.ql.parse.ParseContext; +import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Set; + +public class BucketPruner implements Transform { + + public static final Log LOG = LogFactory.getLog(BucketPruner.class); + + /* + * (non-Javadoc) + * + * @see + * org.apache.hadoop.hive.ql.optimizer.Transform#transform(org.apache.hadoop + * .hive.ql.parse.ParseContext) + */ + @Override + public ParseContext transform(ParseContext pctx) throws SemanticException { + // create a the context for walking operators + NodeProcessorCtx opPartWalkerCtx = new LBOpPartitionWalkerCtx(pctx); + + // Retrieve all partitions generated from partition pruner + // and partition column pruner + PrunerUtils.walkOperatorTree(pctx, opPartWalkerCtx, + LBPartitionProcFactory.getFilterProc(), + LBPartitionProcFactory.getDefaultProc()); + + PrunedPartitionList partsList = + ((LBOpPartitionWalkerCtx) opPartWalkerCtx).getPartitions(); + if (partsList == null) { + return pctx; + } + + Set parts = partsList.getPartitions(); + if (parts == null || parts.size() == 0) { + return pctx; + } + + for (Partition part: parts) { + if (BucketPrunerUtils.isBucketPart(part)) { + // create a context for walking operators + NodeProcessorCtx opWalkerCtx = + new BucketPrunerOpWalkerCtx(pctx.getOpToBucketPruner(), part); + + // walk operator tree to create expression tree for buckets + PrunerUtils.walkOperatorTree(pctx, opWalkerCtx, + BucketPrunerOpProcFactory.getFilterProc(), + BucketPrunerOpProcFactory.getDefaultProc()); + } + } + + return pctx; + } + + /** + * Get the bucket list for the table that satisfies the bucket pruner + * condition. + */ + public static Path[] prune(ParseContext ctx, Partition part, + ExprNodeDesc pruner) { + Path[] finalPaths; + + LOG.trace("started pruning bucket"); + try { + finalPaths = execute(ctx, part, pruner); + } catch (SemanticException e) { + LOG.warn("Using full buckets scan :" + + Arrays.toString(part.getPath()) + ".", e); + finalPaths = part.getPath(); + } + + return finalPaths; + } + + private static Path[] execute(ParseContext ctx, Partition part, + ExprNodeDesc pruner) throws SemanticException { + Path[] finalPaths; + List selectedPaths = new ArrayList<>(); + + if (BucketPrunerUtils.isUnknownState(pruner)) { + // Use full buckets for error case. + LOG.warn("Bucket pruner is either null or in unknown state " + + " so that it uses full buckets scan :" + Arrays.toString(part.getPath())); + finalPaths = part.getPath(); + } else { + List bucketNames = new ArrayList<>(); + int bucketNum = part.getBucketCount(); + for (int i = 0; i < bucketNum; i++) { + bucketNames.add(part.getBucketPath(i).getName()); + } + + decideBucketFileSelection(part, pruner, selectedPaths, bucketNames); + + finalPaths = generateFinalPath(part, selectedPaths); + } + + return finalPaths; + } + + private static void decideBucketFileSelection(Partition part, + ExprNodeDesc pruner, List selectedPaths, List bucketNames) { + try { + for (String bucket : bucketNames) { + Boolean matchResult = BucketPrunerUtils.evaluateExprOnCell(bucket, pruner); + if (matchResult == null || matchResult) { + selectedPaths.add(new Path(bucket)); + } + } + } catch (HiveException e) { + LOG.error(e); + } + } + + /** + * Decide the final path. + * + * @param part + * @param selectedPaths + * @return + */ + private static Path[] generateFinalPath(Partition part, List selectedPaths) { + Path[] finalPaths; + if (selectedPaths.size() == 0) { + LOG.warn("Using full buckets scan :" + Arrays.toString(part.getPath()) + "."); + finalPaths = part.getPath(); + } else { + finalPaths = selectedPaths.toArray(new Path[0]); + } + return finalPaths; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/bucketpruner/BucketPrunerExprProcCtx.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/bucketpruner/BucketPrunerExprProcCtx.java new file mode 100644 index 0000000..4622b53 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/bucketpruner/BucketPrunerExprProcCtx.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.bucketpruner; + +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.Partition; + +public class BucketPrunerExprProcCtx implements NodeProcessorCtx { + + private String tabAlias; + private final Partition part; + + public BucketPrunerExprProcCtx(String tabAlias, Partition part) { + this.tabAlias = tabAlias; + this.part = part; + } + + public String getTabAlias() { + return tabAlias; + } + + public void setTabAlias(String tabAlias) { + this.tabAlias = tabAlias; + } + + public Partition genPart() { + return part; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/bucketpruner/BucketPrunerExprProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/bucketpruner/BucketPrunerExprProcFactory.java new file mode 100644 index 0000000..80d2fd2 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/bucketpruner/BucketPrunerExprProcFactory.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.bucketpruner; + +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.NodeProcessor; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.optimizer.PrunerExpressionOperatorFactory; +import org.apache.hadoop.hive.ql.optimizer.PrunerUtils; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; + +import java.util.List; +import java.util.Map; + +public class BucketPrunerExprProcFactory + extends PrunerExpressionOperatorFactory { + + public static class BucketPrunerExprProcessor extends ColumnExprProcessor { + + @Override + protected ExprNodeDesc processColumnDesc(NodeProcessorCtx procCtx, + ExprNodeColumnDesc cd) { + ExprNodeDesc newcd; + BucketPrunerExprProcCtx ctx = (BucketPrunerExprProcCtx) procCtx; + Partition part = ctx.genPart(); + + if (cd.getTabAlias().equalsIgnoreCase(ctx.getTabAlias()) + && isPruneForBucket(part, cd.getColumn())) { + newcd = cd.clone(); + } else { + newcd = new ExprNodeConstantDesc(cd.getTypeInfo(), null); + } + return newcd; + } + + private boolean isPruneForBucket(Partition part, String columnName) { + List bucketCols = part.getBucketCols(); + int bucketCount = part.getBucketCount(); + return bucketCols != null && bucketCols.contains(columnName) + && bucketCount > 0; + } + } + + private BucketPrunerExprProcFactory() {} + + public static ExprNodeDesc genPruner(String tabAlias, ExprNodeDesc pred, + Partition part) throws SemanticException { + NodeProcessorCtx bucketPrunerCtx = new BucketPrunerExprProcCtx(tabAlias, + part); + + Map outputMap = PrunerUtils.walkExprTree(pred, + bucketPrunerCtx, getColumnProcessor(), getFieldProcessor(), + getGenericFuncProcessor(), getDefaultExprProcessor()); + + return (ExprNodeDesc) outputMap.get(pred); + } + + public static NodeProcessor getColumnProcessor() { + return new BucketPrunerExprProcessor(); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/bucketpruner/BucketPrunerOpProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/bucketpruner/BucketPrunerOpProcFactory.java new file mode 100644 index 0000000..df30953 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/bucketpruner/BucketPrunerOpProcFactory.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.bucketpruner; + +import org.apache.hadoop.hive.ql.exec.FilterOperator; +import org.apache.hadoop.hive.ql.exec.TableScanOperator; +import org.apache.hadoop.hive.ql.lib.NodeProcessor; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.optimizer.PrunerOperatorFactory; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; + +public class BucketPrunerOpProcFactory extends PrunerOperatorFactory { + + public static class BucketPrunerFilterPruner extends FilterPruner { + + @Override + protected void generatePredicate(NodeProcessorCtx procCtx, + FilterOperator fop, TableScanOperator top) throws SemanticException { + BucketPrunerOpWalkerCtx owc = (BucketPrunerOpWalkerCtx) procCtx; + ExprNodeDesc predicate = fop.getConf().getPredicate(); + String alias = top.getConf().getAlias(); + + // Generate the list bucketing pruning predicate + ExprNodeDesc bucketPrunerPred = BucketPrunerExprProcFactory.genPruner( + alias, predicate, owc.getPart()); + + addPruningPred(owc.getOpToPartToBucketPruner(), top, bucketPrunerPred, + owc.getPart()); + } + + } + + private BucketPrunerOpProcFactory() {} + + public static NodeProcessor getFilterProc() { + return new BucketPrunerFilterPruner(); + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/bucketpruner/BucketPrunerOpWalkerCtx.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/bucketpruner/BucketPrunerOpWalkerCtx.java new file mode 100644 index 0000000..0afbcf3 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/bucketpruner/BucketPrunerOpWalkerCtx.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.bucketpruner; + +import org.apache.hadoop.hive.ql.exec.TableScanOperator; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; + +import java.util.Map; + +public class BucketPrunerOpWalkerCtx implements NodeProcessorCtx { + + private final Map> + opToPartToBucketPruner; + private final Partition part; + + public BucketPrunerOpWalkerCtx( + Map> opToPartToBucketPruner, + Partition part) { + this.opToPartToBucketPruner = opToPartToBucketPruner; + this.part = part; + } + + public Map> + getOpToPartToBucketPruner() { + return opToPartToBucketPruner; + } + + public Partition getPart() { + return part; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/bucketpruner/BucketPrunerUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/bucketpruner/BucketPrunerUtils.java new file mode 100644 index 0000000..6e242ae --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/bucketpruner/BucketPrunerUtils.java @@ -0,0 +1,161 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.bucketpruner; + +import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator; +import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluatorFactory; +import org.apache.hadoop.hive.ql.exec.FunctionRegistry; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.optimizer.listbucketingpruner.ListBucketingPrunerUtils; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual; + +import java.util.List; + +public class BucketPrunerUtils { + + /** + * Check if the partition is bucketing + * + * @param part + * @return + */ + public static boolean isBucketPart(Partition part) { + return part.getBucketCols() != null && part.getBucketCount() > 0; + } + + static Boolean evaluateExprOnCell(String bucket, ExprNodeDesc pruner) + throws HiveException { + return recursiveExpr(pruner, bucket); + } + + /** + * Walk through expression tree recursively to evaluate. + * + * @param node + * @param bucket + * @return + * @throws SemanticException + */ + private static Boolean recursiveExpr(final ExprNodeDesc node, + final String bucket) throws HiveException { + if (isUnknownState(node)) { + return null; + } + + if (node instanceof ExprNodeGenericFuncDesc) { + if (((ExprNodeGenericFuncDesc) node).getGenericUDF() instanceof GenericUDFOPEqual) { + return evaluateEqualNd(node, bucket); + } else if (FunctionRegistry.isOpAnd(node)) { + return evaluateAndNode(node, bucket); + } else if (FunctionRegistry.isOpOr(node)) { + return evaluateOrNode(node, bucket); + } else if (FunctionRegistry.isOpNot(node)) { + return evaluateNotNode(node, bucket); + } else { + return null; + } + } else { + return null; + } + } + + private static Boolean evaluateEqualNd(final ExprNodeDesc node, + final String bucket) throws HiveException { + List children = node.getChildren(); + + assert (children != null && children.size() == 2) : "GenericUDFOPEqual should have 2" + + "ExprNodeDesc. Node name : " + node.getName(); + ExprNodeDesc left = children.get(0); + ExprNodeDesc right = children.get(1); + + assert (left instanceof ExprNodeColumnDesc && right instanceof ExprNodeConstantDesc) : + "GenericUDFOPEqual should have 2 children: " + + "the first is ExprNodeColumnDesc and the second is ExprNodeConstantDesc. " + + "But this one, the first one is " + left.getName() + " and the second is " + + right.getName(); + + return startComparisonInEqualNode(bucket, left, right); + } + + private static Boolean startComparisonInEqualNode(final String bucket, + ExprNodeDesc left, ExprNodeDesc right) throws HiveException { + Object constantValueInFilter = ((ExprNodeConstantDesc) right).getValue(); + + return coreComparisonInEqualNode(bucket, left, constantValueInFilter); + } + + private static Boolean coreComparisonInEqualNode(String bucket, ExprNodeDesc desc, + Object constantValueInFilter) throws HiveException { + ExprNodeEvaluator evaluator = ExprNodeEvaluatorFactory.get(desc); + //evaluator.initialize(rowInspector); + Object hashValue = evaluator.evaluate(constantValueInFilter); + + return hashValue.toString().equals(bucket); + } + + private static Boolean evaluateNotNode(final ExprNodeDesc node, + final String bucket) throws HiveException { + List children = node.getChildren(); + if ((children == null) || (children.size() != 1)) { + throw new SemanticException("GenericUDFOPNot should have 1 ExprNodeDesc. Node name : " + + node.getName()); + } + ExprNodeDesc child = children.get(0); + return ListBucketingPrunerUtils.notBoolOperand(recursiveExpr(child, bucket)); + } + + private static Boolean evaluateOrNode(final ExprNodeDesc node, + final String bucket) throws HiveException { + List children = node.getChildren(); + if ((children == null) || (children.size() != 2)) { + throw new SemanticException("GenericUDFOPOr should have 2 ExprNodeDesc. Node name : " + + node.getName()); + } + ExprNodeDesc left = children.get(0); + ExprNodeDesc right = children.get(1); + return ListBucketingPrunerUtils.orBoolOperand(recursiveExpr(left, bucket), recursiveExpr(right, bucket)); + } + + private static Boolean evaluateAndNode(final ExprNodeDesc node, + final String bucket) throws HiveException { + List children = node.getChildren(); + if ((children == null) || (children.size() != 2)) { + throw new SemanticException("GenericUDFOPAnd should have 2 ExprNodeDesc. Node name : " + + node.getName()); + } + ExprNodeDesc left = children.get(0); + ExprNodeDesc right = children.get(1); + return ListBucketingPrunerUtils.andBoolOperand(recursiveExpr(left, bucket), recursiveExpr(right, bucket)); + } + + static boolean isUnknownState(ExprNodeDesc descNd) { + boolean unknown = false; + if ((descNd == null) + || (descNd instanceof ExprNodeConstantDesc + && ((ExprNodeConstantDesc) descNd).getValue() == null)) { + unknown = true; + } + return unknown; + } +} diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java index 5872e8e..c4ebb31 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext; import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.AnalyzeRewriteContext; +import org.apache.hadoop.hive.ql.plan.BucketPrunerDesc; import org.apache.hadoop.hive.ql.plan.CreateTableDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.FilterDesc.SampleDesc; @@ -86,6 +87,7 @@ // reducer private Map prunedPartitions; private Map viewAliasToInput; + private Map> opToBucketPruner; /** * The lineage information. @@ -394,6 +396,18 @@ public void setPrunedPartitions( } /** + * @return the opToBucketPruner + */ + public Map> getOpToBucketPruner() { + return opToBucketPruner; + } + + public void setOpToBucketPruner( + Map> opToBucketPruner) { + this.opToBucketPruner = opToBucketPruner; + } + + /** * Sets the lineage information. * * @param lInfo The lineage information.