diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java index c82bab8..1c7b14c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hive.ql.plan.JoinDesc; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.Statistics; +import org.apache.hadoop.hive.ql.plan.Statistics.State; import org.apache.hadoop.hive.ql.stats.StatsUtils; import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd; @@ -213,6 +214,9 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, * satisfy condition2 * *

+ * Worst case: If no column statistics are available, then T(R) = T(R)/2 will be + * used as heuristics. + *

* For more information, refer 'Estimating The Cost Of Operations' chapter in * "Database Systems: The Complete Book" by Garcia-Molina et. al. *

@@ -239,7 +243,10 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, fop.setStatistics(st); } else { if (parentStats != null) { - fop.setStatistics(parentStats.clone()); + + // worst case, in the absence of column statistics assume half the rows are emitted + Statistics wcStats = getWorstCaseStats(parentStats.clone()); + fop.setStatistics(wcStats); } } @@ -510,6 +517,9 @@ private long evaluateChildExpr(Statistics stats, ExprNodeDesc child, AnnotateSta * assumed. * *

+ * Worst case: If no column statistics are available, then T(R) = T(R)/2 will be + * used as heuristics. + *

* For more information, refer 'Estimating The Cost Of Operations' chapter in * "Database Systems: The Complete Book" by Garcia-Molina et. al. *

@@ -623,7 +633,18 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, gop.setStatistics(stats); } else { if (parentStats != null) { - gop.setStatistics(parentStats.clone()); + + // worst case, in the absence of column statistics assume half the rows are emitted + if (gop.getChildOperators().get(0) instanceof ReduceSinkOperator) { + + // map side + gop.setStatistics(parentStats.clone()); + } else { + + // reduce side + Statistics wcStats = getWorstCaseStats(parentStats); + gop.setStatistics(wcStats); + } } } } catch (CloneNotSupportedException e) { @@ -668,6 +689,9 @@ private long applyGBYRule(long numRows, long dvProd) { * attributes * *

+ * Worst case: If no column statistics are available, then T(RXS) = T(R)*T(S)/2 will be + * used as heuristics. + *

* For more information, refer 'Estimating The Cost Of Operations' chapter in * "Database Systems: The Complete Book" by Garcia-Molina et. al. *

@@ -698,7 +722,9 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } } + try { if (allSatisfyPreCondition) { + // statistics object that is combination of statistics from all relations involved in JOIN Statistics stats = new Statistics(); long prodRows = 1; @@ -744,7 +770,6 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // since new statistics is derived from all relations involved in JOIN, // we need to update the state information accordingly - stats.updateBasicStatsState(parentStats.getBasicStatsState()); stats.updateColumnStatsState(parentStats.getColumnStatsState()); } @@ -812,6 +837,28 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, stats.setNumRows(newRowCount); stats.setDataSize(StatsUtils.getDataSizeFromColumnStats(newRowCount, outColStats)); jop.setStatistics(stats); + } else { + + // worst case, when no column statistics are available + if (parents.size() > 1) { + Statistics wcStats = new Statistics(); + Statistics stp1 = parents.get(0).getStatistics(); + long numRows = stp1.getNumRows(); + long avgRowSize = stp1.getAvgRowSize(); + for (int i = 1; i < parents.size(); i++) { + stp1 = parents.get(i).getStatistics(); + numRows = (numRows * stp1.getNumRows()) / 2; + avgRowSize += stp1.getAvgRowSize(); + } + wcStats.setNumRows(numRows); + wcStats.setDataSize(numRows * avgRowSize); + jop.setStatistics(wcStats); + } else { + jop.setStatistics(parents.get(0).getStatistics().clone()); + } + } + } catch (CloneNotSupportedException e) { + throw new SemanticException(ErrorMsg.STATISTICS_CLONING_FAILED.getMsg()); } } return null; @@ -857,22 +904,31 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Statistics parentStats = parent.getStatistics(); try { + long limit = -1; + limit = lop.getConf().getLimit(); + if (satisfyPrecondition(parentStats)) { Statistics stats = parentStats.clone(); - long limit = -1; - limit = lop.getConf().getLimit(); - if (limit == -1) { - limit = lop.getConf().getLeastRows(); - } - // if limit is greate than available rows then do not update statistics + // if limit is greater than available rows then do not update statistics if (limit <= parentStats.getNumRows()) { updateStats(stats, limit); } lop.setStatistics(stats); } else { if (parentStats != null) { - lop.setStatistics(parentStats.clone()); + + // in the absence of column statistics, compute data size based on based + // on average row size + Statistics wcStats = parentStats.clone(); + if (limit <= parentStats.getNumRows()) { + long numRows = limit; + long avgRowSize = parentStats.getAvgRowSize(); + long dataSize = avgRowSize * limit; + wcStats.setNumRows(numRows); + wcStats.setDataSize(dataSize); + } + lop.setStatistics(wcStats); } } } catch (CloneNotSupportedException e) { @@ -909,7 +965,6 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Statistics parentStats = parent.getStatistics(); stats.addToNumRows(parentStats.getNumRows()); stats.addToDataSize(parentStats.getDataSize()); - stats.updateBasicStatsState(parentStats.getBasicStatsState()); stats.updateColumnStatsState(parentStats.getColumnStatsState()); stats.addToColumnStats(parentStats.getColumnStats()); op.getConf().setStatistics(stats); @@ -994,11 +1049,26 @@ static void updateStats(Statistics stats, long newNumRows) { } stats.setColumnStats(colStats); long newDataSize = StatsUtils.getDataSizeFromColumnStats(newNumRows, colStats); - stats.setDataSize(newDataSize); + if (newDataSize > 0) { + stats.setDataSize(newDataSize); + } } static boolean satisfyPrecondition(Statistics stats) { return stats != null && stats.getBasicStatsState().equals(Statistics.State.COMPLETE) && !stats.getColumnStatsState().equals(Statistics.State.NONE); } + + static Statistics getWorstCaseStats(Statistics stats) throws CloneNotSupportedException { + Statistics wcClone = stats.clone(); + long numRows = wcClone.getNumRows() / 2; + long dataSize = wcClone.getDataSize() / 2; + long avgRowSize = wcClone.getAvgRowSize(); + if (numRows > 0) { + dataSize = avgRowSize * numRows; + } + wcClone.setNumRows(numRows); + wcClone.setDataSize(dataSize); + return wcClone; + } } diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/Statistics.java ql/src/java/org/apache/hadoop/hive/ql/plan/Statistics.java index a16c8ff..baa0b46 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/Statistics.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/Statistics.java @@ -49,8 +49,8 @@ public Statistics() { } public Statistics(long nr, long ds) { - this.numRows = nr; - this.dataSize = ds; + this.setNumRows(nr); + this.setDataSize(ds); this.basicStatsState = State.NONE; this.columnStats = null; this.columnStatsState = State.NONE; @@ -62,6 +62,7 @@ public long getNumRows() { public void setNumRows(long numRows) { this.numRows = numRows; + updateBasicStatsState(); } public long getDataSize() { @@ -70,6 +71,17 @@ public long getDataSize() { public void setDataSize(long dataSize) { this.dataSize = dataSize; + updateBasicStatsState(); + } + + private void updateBasicStatsState() { + if (numRows <= 0 && dataSize <= 0) { + this.basicStatsState = State.NONE; + } else if (numRows <= 0 || dataSize <= 0) { + this.basicStatsState = State.PARTIAL; + } else { + this.basicStatsState = State.COMPLETE; + } } public State getBasicStatsState() { @@ -120,10 +132,12 @@ public Statistics clone() throws CloneNotSupportedException { public void addToNumRows(long nr) { numRows += nr; + updateBasicStatsState(); } public void addToDataSize(long rds) { dataSize += rds; + updateBasicStatsState(); } public void setColumnStats(Map colStats) { @@ -162,37 +176,14 @@ public void addToColumnStats(List colStats) { } } - // newState + // newState // ----------------------------------------- - // basicStatsState | COMPLETE PARTIAL NONE | + // columnStatsState | COMPLETE PARTIAL NONE | // |________________________________________| // COMPLETE | COMPLETE PARTIAL PARTIAL | // PARTIAL | PARTIAL PARTIAL PARTIAL | // NONE | COMPLETE PARTIAL NONE | // ----------------------------------------- - public void updateBasicStatsState(State newState) { - if (newState.equals(State.PARTIAL)) { - basicStatsState = State.PARTIAL; - } - - if (newState.equals(State.NONE)) { - if (basicStatsState.equals(State.NONE)) { - basicStatsState = State.NONE; - } else { - basicStatsState = State.PARTIAL; - } - } - - if (newState.equals(State.COMPLETE)) { - if (basicStatsState.equals(State.PARTIAL)) { - basicStatsState = State.PARTIAL; - } else { - basicStatsState = State.COMPLETE; - } - } - } - - // similar to the table above for basic stats public void updateColumnStatsState(State newState) { if (newState.equals(State.PARTIAL)) { columnStatsState = State.PARTIAL; @@ -216,11 +207,11 @@ public void updateColumnStatsState(State newState) { } public long getAvgRowSize() { - if (basicStatsState.equals(State.COMPLETE) && numRows != 0) { + if (numRows != 0) { return dataSize / numRows; } - return 0; + return dataSize; } public ColStatistics getColumnStatisticsFromFQColName(String fqColName) { diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java index 24e7b61..effdddb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeNullDesc; import org.apache.hadoop.hive.ql.plan.Statistics; +import org.apache.hadoop.hive.ql.plan.Statistics.State; import org.apache.hadoop.hive.ql.util.JavaDataModel; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector; @@ -98,22 +99,6 @@ public static Statistics collectStatistics(HiveConf conf, PrunedPartitionList pa } } - // if basic stats are not available then return - if (nr <= 0 && rds <= 0) { - stats.setBasicStatsState(Statistics.State.NONE); - return stats; - } - - // if any basic stats is missing, mark it as partial stats - if (nr <= 0 || rds <= 0) { - stats.setBasicStatsState(Statistics.State.PARTIAL); - } - - // if both are available then we have complete basic stats - if (nr > 0 && rds > 0) { - stats.setBasicStatsState(Statistics.State.COMPLETE); - } - // number of rows -1 means that statistics from metastore is not reliable if (nr <= 0) { nr = 0; @@ -177,19 +162,6 @@ public static Statistics collectStatistics(HiveConf conf, PrunedPartitionList pa rds = getSumIgnoreNegatives(dataSizes); } - // basic stats - if (nr <= 0 && rds <= 0) { - stats.updateBasicStatsState(Statistics.State.NONE); - } else if (nr <= 0 || rds <= 0) { - stats.updateBasicStatsState(Statistics.State.PARTIAL); - } else { - if (containsNonPositives(rowCounts) || containsNonPositives(dataSizes)) { - stats.updateBasicStatsState(Statistics.State.PARTIAL); - } else { - stats.updateBasicStatsState(Statistics.State.COMPLETE); - } - } - // number of rows -1 means that statistics from metastore is not reliable if (nr <= 0) { nr = 0; @@ -197,6 +169,11 @@ public static Statistics collectStatistics(HiveConf conf, PrunedPartitionList pa stats.addToNumRows(nr); stats.addToDataSize(rds); + // if atleast a partition does not contain row count then mark basic stats state as PARTIAL + if (containsNonPositives(rowCounts)) { + stats.setBasicStatsState(State.PARTIAL); + } + // column stats for (Partition part : partList.getNotDeniedPartns()) { List colStats = getPartitionColumnStats(table, part, schema, neededColumns); @@ -219,7 +196,6 @@ public static Statistics collectStatistics(HiveConf conf, PrunedPartitionList pa } return stats; - } /** diff --git ql/src/test/results/clientpositive/alter_partition_coltype.q.out ql/src/test/results/clientpositive/alter_partition_coltype.q.out index bcfe175..d5c16cd 100644 --- ql/src/test/results/clientpositive/alter_partition_coltype.q.out +++ ql/src/test/results/clientpositive/alter_partition_coltype.q.out @@ -494,10 +494,10 @@ STAGE PLANS: expr: ((ts = 3.0) and (dt = 10)) type: boolean Statistics: - numRows: 75 dataSize: 0 basicStatsState: COMPLETE colStatsState: COMPLETE + numRows: 75 dataSize: 573 basicStatsState: COMPLETE colStatsState: COMPLETE Select Operator Statistics: - numRows: 75 dataSize: 0 basicStatsState: COMPLETE colStatsState: COMPLETE + numRows: 75 dataSize: 573 basicStatsState: COMPLETE colStatsState: COMPLETE Group By Operator aggregations: expr: count() diff --git ql/src/test/results/clientpositive/annotate_stats_filter.q.out ql/src/test/results/clientpositive/annotate_stats_filter.q.out index 7c1ebd3..205fa2d 100644 --- ql/src/test/results/clientpositive/annotate_stats_filter.q.out +++ ql/src/test/results/clientpositive/annotate_stats_filter.q.out @@ -121,7 +121,7 @@ STAGE PLANS: expr: (state = 'OH') type: boolean Statistics: - numRows: 8 dataSize: 796 basicStatsState: COMPLETE colStatsState: NONE + numRows: 4 dataSize: 396 basicStatsState: COMPLETE colStatsState: NONE Select Operator expressions: expr: state @@ -134,14 +134,14 @@ STAGE PLANS: type: int outputColumnNames: _col0, _col1, _col2, _col3 Statistics: - numRows: 8 dataSize: 796 basicStatsState: COMPLETE colStatsState: NONE + numRows: 4 dataSize: 396 basicStatsState: COMPLETE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 Statistics: - numRows: 8 dataSize: 796 basicStatsState: COMPLETE colStatsState: NONE + numRows: 4 dataSize: 396 basicStatsState: COMPLETE colStatsState: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -1257,7 +1257,7 @@ STAGE PLANS: expr: (not true) type: boolean Statistics: - numRows: 0 dataSize: 0 basicStatsState: COMPLETE colStatsState: COMPLETE + numRows: 0 dataSize: 796 basicStatsState: PARTIAL colStatsState: COMPLETE Select Operator expressions: expr: state @@ -1270,14 +1270,14 @@ STAGE PLANS: type: int outputColumnNames: _col0, _col1, _col2, _col3 Statistics: - numRows: 0 dataSize: 0 basicStatsState: COMPLETE colStatsState: COMPLETE + numRows: 0 dataSize: 796 basicStatsState: PARTIAL colStatsState: COMPLETE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 Statistics: - numRows: 0 dataSize: 0 basicStatsState: COMPLETE colStatsState: COMPLETE + numRows: 0 dataSize: 796 basicStatsState: PARTIAL colStatsState: COMPLETE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/annotate_stats_join.q.out ql/src/test/results/clientpositive/annotate_stats_join.q.out index f705f31..fe1c3b8 100644 --- ql/src/test/results/clientpositive/annotate_stats_join.q.out +++ ql/src/test/results/clientpositive/annotate_stats_join.q.out @@ -1456,7 +1456,7 @@ STAGE PLANS: handleSkewJoin: false outputColumnNames: _col0, _col1, _col4, _col5 Statistics: - numRows: 0 dataSize: 0 basicStatsState: COMPLETE colStatsState: COMPLETE + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: COMPLETE Select Operator expressions: expr: _col0 @@ -1469,14 +1469,14 @@ STAGE PLANS: type: string outputColumnNames: _col0, _col1, _col2, _col3 Statistics: - numRows: 0 dataSize: 0 basicStatsState: COMPLETE colStatsState: COMPLETE + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: COMPLETE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 Statistics: - numRows: 0 dataSize: 0 basicStatsState: COMPLETE colStatsState: COMPLETE + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: COMPLETE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -1761,7 +1761,7 @@ STAGE PLANS: handleSkewJoin: false outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col10, _col11 Statistics: - numRows: 0 dataSize: 0 basicStatsState: COMPLETE colStatsState: COMPLETE + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: COMPLETE Select Operator expressions: expr: _col0 @@ -1782,14 +1782,14 @@ STAGE PLANS: type: int outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: - numRows: 0 dataSize: 0 basicStatsState: COMPLETE colStatsState: COMPLETE + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: COMPLETE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 Statistics: - numRows: 0 dataSize: 0 basicStatsState: COMPLETE colStatsState: COMPLETE + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: COMPLETE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/annotate_stats_limit.q.out ql/src/test/results/clientpositive/annotate_stats_limit.q.out index c6a446c..265e708 100644 --- ql/src/test/results/clientpositive/annotate_stats_limit.q.out +++ ql/src/test/results/clientpositive/annotate_stats_limit.q.out @@ -231,7 +231,7 @@ STAGE PLANS: numRows: 8 dataSize: 796 basicStatsState: COMPLETE colStatsState: COMPLETE Limit Statistics: - numRows: 0 dataSize: 0 basicStatsState: COMPLETE colStatsState: COMPLETE + numRows: 0 dataSize: 796 basicStatsState: PARTIAL colStatsState: COMPLETE ListSink diff --git ql/src/test/results/clientpositive/annotate_stats_part.q.out ql/src/test/results/clientpositive/annotate_stats_part.q.out index 914a114..333b8d6 100644 --- ql/src/test/results/clientpositive/annotate_stats_part.q.out +++ ql/src/test/results/clientpositive/annotate_stats_part.q.out @@ -1555,7 +1555,7 @@ STAGE PLANS: expr: (year <> 2001) type: boolean Statistics: - numRows: 1 dataSize: 325 basicStatsState: COMPLETE colStatsState: NONE + numRows: 0 dataSize: 162 basicStatsState: PARTIAL colStatsState: NONE Select Operator expressions: expr: state @@ -1564,14 +1564,14 @@ STAGE PLANS: type: int outputColumnNames: _col0, _col1 Statistics: - numRows: 1 dataSize: 325 basicStatsState: COMPLETE colStatsState: NONE + numRows: 0 dataSize: 162 basicStatsState: PARTIAL colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 Statistics: - numRows: 1 dataSize: 325 basicStatsState: COMPLETE colStatsState: NONE + numRows: 0 dataSize: 162 basicStatsState: PARTIAL colStatsState: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/annotate_stats_select.q.out ql/src/test/results/clientpositive/annotate_stats_select.q.out index 7a1e9cc..1f1056d 100644 --- ql/src/test/results/clientpositive/annotate_stats_select.q.out +++ ql/src/test/results/clientpositive/annotate_stats_select.q.out @@ -932,14 +932,14 @@ STAGE PLANS: type: string outputColumnNames: _col0 Statistics: - numRows: 2 dataSize: 0 basicStatsState: COMPLETE colStatsState: COMPLETE + numRows: 2 dataSize: 0 basicStatsState: PARTIAL colStatsState: COMPLETE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 Statistics: - numRows: 2 dataSize: 0 basicStatsState: COMPLETE colStatsState: COMPLETE + numRows: 2 dataSize: 0 basicStatsState: PARTIAL colStatsState: COMPLETE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -4555,21 +4555,21 @@ STAGE PLANS: expr: (not bo1) type: boolean Statistics: - numRows: 0 dataSize: 0 basicStatsState: COMPLETE colStatsState: COMPLETE + numRows: 0 dataSize: 1514 basicStatsState: PARTIAL colStatsState: COMPLETE Select Operator expressions: expr: bo1 type: boolean outputColumnNames: _col0 Statistics: - numRows: 0 dataSize: 0 basicStatsState: COMPLETE colStatsState: COMPLETE + numRows: 0 dataSize: 1514 basicStatsState: PARTIAL colStatsState: COMPLETE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 Statistics: - numRows: 0 dataSize: 0 basicStatsState: COMPLETE colStatsState: COMPLETE + numRows: 0 dataSize: 1514 basicStatsState: PARTIAL colStatsState: COMPLETE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/auto_join_reordering_values.q.out ql/src/test/results/clientpositive/auto_join_reordering_values.q.out index 19c446b..68b77da 100644 --- ql/src/test/results/clientpositive/auto_join_reordering_values.q.out +++ ql/src/test/results/clientpositive/auto_join_reordering_values.q.out @@ -209,6 +209,8 @@ STAGE PLANS: 1 {VALUE._col1} handleSkewJoin: false outputColumnNames: _col0, _col3, _col4, _col8 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 @@ -241,6 +243,8 @@ STAGE PLANS: Map-reduce partition columns: expr: _col0 type: int + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE tag: 0 value expressions: expr: _col8 @@ -347,6 +351,8 @@ STAGE PLANS: 1 {VALUE._col0} handleSkewJoin: false outputColumnNames: _col1, _col10, _col11, _col14 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 @@ -379,6 +385,8 @@ STAGE PLANS: Map-reduce partition columns: expr: _col10 type: int + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE tag: 0 value expressions: expr: _col14 @@ -482,6 +490,8 @@ STAGE PLANS: 1 handleSkewJoin: false outputColumnNames: _col1, _col7, _col18 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 @@ -514,6 +524,8 @@ STAGE PLANS: Map-reduce partition columns: expr: _col18 type: int + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE tag: 0 value expressions: expr: _col7 @@ -615,6 +627,8 @@ STAGE PLANS: 1 handleSkewJoin: false outputColumnNames: _col1, _col7 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Select Operator expressions: expr: _col1 @@ -622,12 +636,18 @@ STAGE PLANS: expr: _col7 type: int outputColumnNames: _col0, _col1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Limit + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out index ad61ec5..f171708 100644 --- ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out +++ ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out @@ -175,14 +175,20 @@ STAGE PLANS: 1 [Column[key]] Position of Big Table: 1 Select Operator + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Group By Operator aggregations: expr: count() bucketGroup: false mode: hash outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Reduce Output Operator sort order: + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE tag: -1 value expressions: expr: _col0 @@ -332,16 +338,22 @@ STAGE PLANS: bucketGroup: false mode: mergepartial outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Select Operator expressions: expr: _col0 type: bigint outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -362,6 +374,7 @@ STAGE PLANS: Fetch Operator limit: -1 + PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY PREHOOK: Input: default@bucket_big @@ -487,14 +500,20 @@ STAGE PLANS: 1 [Column[key]] Position of Big Table: 1 Select Operator + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Group By Operator aggregations: expr: count() bucketGroup: false mode: hash outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Reduce Output Operator sort order: + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE tag: -1 value expressions: expr: _col0 @@ -644,16 +663,22 @@ STAGE PLANS: bucketGroup: false mode: mergepartial outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Select Operator expressions: expr: _col0 type: bigint outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -674,6 +699,7 @@ STAGE PLANS: Fetch Operator limit: -1 + PREHOOK: query: select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY PREHOOK: Input: default@bucket_big @@ -798,16 +824,24 @@ STAGE PLANS: 0 [Column[key]] 1 [Column[key]] Position of Big Table: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE BucketMapJoin: true Select Operator + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Group By Operator aggregations: expr: count() bucketGroup: false mode: hash outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Reduce Output Operator sort order: + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE tag: -1 value expressions: expr: _col0 @@ -914,16 +948,22 @@ STAGE PLANS: bucketGroup: false mode: mergepartial outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Select Operator expressions: expr: _col0 type: bigint outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out index d2efff4..8501e71 100644 --- ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out +++ ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out @@ -346,14 +346,20 @@ STAGE PLANS: 1 [] Position of Big Table: 0 Select Operator + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Group By Operator aggregations: expr: count() bucketGroup: false mode: hash outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Reduce Output Operator sort order: + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE tag: -1 value expressions: expr: _col0 @@ -550,16 +556,22 @@ STAGE PLANS: bucketGroup: false mode: mergepartial outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Select Operator expressions: expr: _col0 type: bigint outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/bucket_map_join_1.q.out ql/src/test/results/clientpositive/bucket_map_join_1.q.out index 90667ac..04a4ede 100644 --- ql/src/test/results/clientpositive/bucket_map_join_1.q.out +++ ql/src/test/results/clientpositive/bucket_map_join_1.q.out @@ -98,15 +98,23 @@ STAGE PLANS: 0 [Column[key], Column[value]] 1 [Column[key], Column[value]] Position of Big Table: 0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Select Operator + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Group By Operator aggregations: expr: count() bucketGroup: false mode: hash outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Reduce Output Operator sort order: + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE tag: -1 value expressions: expr: _col0 @@ -172,16 +180,22 @@ STAGE PLANS: bucketGroup: false mode: mergepartial outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Select Operator expressions: expr: _col0 type: bigint outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/bucket_map_join_2.q.out ql/src/test/results/clientpositive/bucket_map_join_2.q.out index a02cc02..5b1b139 100644 --- ql/src/test/results/clientpositive/bucket_map_join_2.q.out +++ ql/src/test/results/clientpositive/bucket_map_join_2.q.out @@ -98,15 +98,23 @@ STAGE PLANS: 0 [Column[key], Column[value]] 1 [Column[key], Column[value]] Position of Big Table: 0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Select Operator + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Group By Operator aggregations: expr: count() bucketGroup: false mode: hash outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Reduce Output Operator sort order: + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE tag: -1 value expressions: expr: _col0 @@ -172,16 +180,22 @@ STAGE PLANS: bucketGroup: false mode: mergepartial outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Select Operator expressions: expr: _col0 type: bigint outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/bucketcontext_1.q.out ql/src/test/results/clientpositive/bucketcontext_1.q.out index 9261999..cbf36b4 100644 --- ql/src/test/results/clientpositive/bucketcontext_1.q.out +++ ql/src/test/results/clientpositive/bucketcontext_1.q.out @@ -179,16 +179,24 @@ STAGE PLANS: 0 [Column[key]] 1 [Column[key]] Position of Big Table: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE BucketMapJoin: true Select Operator + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Group By Operator aggregations: expr: count() bucketGroup: false mode: hash outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Reduce Output Operator sort order: + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE tag: -1 value expressions: expr: _col0 @@ -297,16 +305,22 @@ STAGE PLANS: bucketGroup: false mode: mergepartial outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Select Operator expressions: expr: _col0 type: bigint outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/bucketcontext_2.q.out ql/src/test/results/clientpositive/bucketcontext_2.q.out index daa9d5d..2ddf829 100644 --- ql/src/test/results/clientpositive/bucketcontext_2.q.out +++ ql/src/test/results/clientpositive/bucketcontext_2.q.out @@ -167,16 +167,24 @@ STAGE PLANS: 0 [Column[key]] 1 [Column[key]] Position of Big Table: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE BucketMapJoin: true Select Operator + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Group By Operator aggregations: expr: count() bucketGroup: false mode: hash outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Reduce Output Operator sort order: + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE tag: -1 value expressions: expr: _col0 @@ -285,16 +293,22 @@ STAGE PLANS: bucketGroup: false mode: mergepartial outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Select Operator expressions: expr: _col0 type: bigint outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/bucketcontext_3.q.out ql/src/test/results/clientpositive/bucketcontext_3.q.out index cac1e07..63f1b4f 100644 --- ql/src/test/results/clientpositive/bucketcontext_3.q.out +++ ql/src/test/results/clientpositive/bucketcontext_3.q.out @@ -209,16 +209,24 @@ STAGE PLANS: 0 [Column[key]] 1 [Column[key]] Position of Big Table: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE BucketMapJoin: true Select Operator + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Group By Operator aggregations: expr: count() bucketGroup: false mode: hash outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Reduce Output Operator sort order: + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE tag: -1 value expressions: expr: _col0 @@ -282,16 +290,22 @@ STAGE PLANS: bucketGroup: false mode: mergepartial outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Select Operator expressions: expr: _col0 type: bigint outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/bucketcontext_4.q.out ql/src/test/results/clientpositive/bucketcontext_4.q.out index 4b1254d..efaf361 100644 --- ql/src/test/results/clientpositive/bucketcontext_4.q.out +++ ql/src/test/results/clientpositive/bucketcontext_4.q.out @@ -221,16 +221,24 @@ STAGE PLANS: 0 [Column[key]] 1 [Column[key]] Position of Big Table: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE BucketMapJoin: true Select Operator + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Group By Operator aggregations: expr: count() bucketGroup: false mode: hash outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Reduce Output Operator sort order: + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE tag: -1 value expressions: expr: _col0 @@ -294,16 +302,22 @@ STAGE PLANS: bucketGroup: false mode: mergepartial outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Select Operator expressions: expr: _col0 type: bigint outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/bucketcontext_5.q.out ql/src/test/results/clientpositive/bucketcontext_5.q.out index a493151..6633ba2 100644 --- ql/src/test/results/clientpositive/bucketcontext_5.q.out +++ ql/src/test/results/clientpositive/bucketcontext_5.q.out @@ -109,16 +109,24 @@ STAGE PLANS: 0 [Column[key]] 1 [Column[key]] Position of Big Table: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE BucketMapJoin: true Select Operator + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Group By Operator aggregations: expr: count() bucketGroup: false mode: hash outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Reduce Output Operator sort order: + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE tag: -1 value expressions: expr: _col0 @@ -184,16 +192,22 @@ STAGE PLANS: bucketGroup: false mode: mergepartial outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Select Operator expressions: expr: _col0 type: bigint outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/bucketcontext_6.q.out ql/src/test/results/clientpositive/bucketcontext_6.q.out index 13fc155..747f4d5 100644 --- ql/src/test/results/clientpositive/bucketcontext_6.q.out +++ ql/src/test/results/clientpositive/bucketcontext_6.q.out @@ -123,16 +123,24 @@ STAGE PLANS: 0 [Column[key]] 1 [Column[key]] Position of Big Table: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE BucketMapJoin: true Select Operator + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Group By Operator aggregations: expr: count() bucketGroup: false mode: hash outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Reduce Output Operator sort order: + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE tag: -1 value expressions: expr: _col0 @@ -241,16 +249,22 @@ STAGE PLANS: bucketGroup: false mode: mergepartial outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Select Operator expressions: expr: _col0 type: bigint outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/bucketcontext_7.q.out ql/src/test/results/clientpositive/bucketcontext_7.q.out index 8aa6fde..3c25078 100644 --- ql/src/test/results/clientpositive/bucketcontext_7.q.out +++ ql/src/test/results/clientpositive/bucketcontext_7.q.out @@ -234,16 +234,24 @@ STAGE PLANS: 0 [Column[key]] 1 [Column[key]] Position of Big Table: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE BucketMapJoin: true Select Operator + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Group By Operator aggregations: expr: count() bucketGroup: false mode: hash outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Reduce Output Operator sort order: + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE tag: -1 value expressions: expr: _col0 @@ -352,16 +360,22 @@ STAGE PLANS: bucketGroup: false mode: mergepartial outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Select Operator expressions: expr: _col0 type: bigint outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/bucketcontext_8.q.out ql/src/test/results/clientpositive/bucketcontext_8.q.out index 21a3d5a..701b55e 100644 --- ql/src/test/results/clientpositive/bucketcontext_8.q.out +++ ql/src/test/results/clientpositive/bucketcontext_8.q.out @@ -234,16 +234,24 @@ STAGE PLANS: 0 [Column[key]] 1 [Column[key]] Position of Big Table: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE BucketMapJoin: true Select Operator + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Group By Operator aggregations: expr: count() bucketGroup: false mode: hash outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Reduce Output Operator sort order: + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE tag: -1 value expressions: expr: _col0 @@ -352,16 +360,22 @@ STAGE PLANS: bucketGroup: false mode: mergepartial outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE Select Operator expressions: expr: _col0 type: bigint outputColumnNames: _col0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/bucketmapjoin4.q.out ql/src/test/results/clientpositive/bucketmapjoin4.q.out index 8f75272..81f6117 100644 --- ql/src/test/results/clientpositive/bucketmapjoin4.q.out +++ ql/src/test/results/clientpositive/bucketmapjoin4.q.out @@ -156,6 +156,8 @@ STAGE PLANS: 1 [Column[key]] outputColumnNames: _col0, _col1, _col5 Position of Big Table: 0 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE BucketMapJoin: true Select Operator expressions: @@ -166,11 +168,15 @@ STAGE PLANS: expr: _col5 type: string outputColumnNames: _col0, _col1, _col2 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -643,6 +649,8 @@ STAGE PLANS: 1 [Column[key]] outputColumnNames: _col0, _col1, _col5 Position of Big Table: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE BucketMapJoin: true Select Operator expressions: @@ -653,11 +661,15 @@ STAGE PLANS: expr: _col5 type: string outputColumnNames: _col0, _col1, _col2 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE File Output Operator compressed: false GlobalTableId: 1 #### A masked pattern was here #### NumFilesPerFileSink: 1 + Statistics: + numRows: 0 dataSize: 0 basicStatsState: NONE colStatsState: NONE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat