diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 01e1d01..0aee530 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1189,13 +1189,6 @@ "Average row size is computed from average column size of all columns in the row. In the absence\n" + "of column statistics and for variable length complex columns like map, the average number of\n" + "entries/values can be specified using this config."), - // to accurately compute statistics for GROUPBY map side parallelism needs to be known - HIVE_STATS_MAP_SIDE_PARALLELISM("hive.stats.map.parallelism", 1, - "Hive/Tez optimizer estimates the data size flowing through each of the operators.\n" + - "For GROUPBY operator, to accurately compute the data size map-side parallelism needs to\n" + - "be known. By default, this value is set to 1 since optimizer is not aware of the number of\n" + - "mappers during compile-time. This Hive config can be used to specify the number of mappers\n" + - "to be used for data size computation of GROUPBY operator."), // statistics annotation fetches stats for each partition, which can be expensive. turning // this off will result in basic sizes being fetched from namenode instead HIVE_STATS_FETCH_PARTITION_STATS("hive.stats.fetch.partition.stats", true, diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java index 792d87f..50ffa56 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java @@ -18,22 +18,7 @@ package org.apache.hadoop.hive.ql.exec; -import java.io.Serializable; -import java.lang.management.ManagementFactory; -import java.lang.management.MemoryMXBean; -import java.lang.reflect.Field; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; - import javolution.util.FastBitSet; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -69,6 +54,20 @@ import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.Text; +import java.io.Serializable; +import java.lang.management.ManagementFactory; +import java.lang.management.MemoryMXBean; +import java.lang.reflect.Field; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; + /** * GroupBy operator implementation. */ @@ -442,10 +441,10 @@ private void computeMaxEntriesHashAggr(Configuration hconf) throws HiveException estimateRowSize(); } - private static final int javaObjectOverHead = 64; - private static final int javaHashEntryOverHead = 64; - private static final int javaSizePrimitiveType = 16; - private static final int javaSizeUnknownType = 256; + public static final int javaObjectOverHead = 64; + public static final int javaHashEntryOverHead = 64; + public static final int javaSizePrimitiveType = 16; + public static final int javaSizeUnknownType = 256; /** * The size of the element at position 'pos' is returned, if possible. If the diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java index ac4b5a1..ee5a5c7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java @@ -405,7 +405,7 @@ private EdgeProperty createEdgeProperty(TezEdgeProperty edgeProp, Configuration * from yarn. Falls back to Map-reduce's map size if tez * container size isn't set. */ - private Resource getContainerResource(Configuration conf) { + public static Resource getContainerResource(Configuration conf) { int memory = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCONTAINERSIZE) > 0 ? HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCONTAINERSIZE) : conf.getInt(MRJobConfig.MAP_MEMORY_MB, MRJobConfig.DEFAULT_MAP_MEMORY_MB); diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java index 13d1f88..d445a4d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java @@ -18,8 +18,14 @@ package org.apache.hadoop.hive.ql.optimizer.stats.annotation; -import com.google.common.collect.Lists; -import com.google.common.collect.Maps; +import java.lang.reflect.Field; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.Stack; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; @@ -31,10 +37,12 @@ import org.apache.hadoop.hive.ql.exec.GroupByOperator; import org.apache.hadoop.hive.ql.exec.LimitOperator; import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.OperatorUtils; import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator; import org.apache.hadoop.hive.ql.exec.RowSchema; import org.apache.hadoop.hive.ql.exec.SelectOperator; import org.apache.hadoop.hive.ql.exec.TableScanOperator; +import org.apache.hadoop.hive.ql.exec.tez.DagUtils; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; @@ -48,10 +56,12 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.GroupByDesc; import org.apache.hadoop.hive.ql.plan.JoinDesc; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.Statistics; import org.apache.hadoop.hive.ql.stats.StatsUtils; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual; @@ -66,13 +76,10 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNull; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr; import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.Stack; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; public class StatsRulesProcFactory { @@ -579,10 +586,16 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, GroupByOperator gop = (GroupByOperator) nd; Operator parent = gop.getParentOperators().get(0); Statistics parentStats = parent.getStatistics(); + // parent stats are not populated yet + if (parentStats == null) { + return null; + } + AnnotateStatsProcCtx aspCtx = (AnnotateStatsProcCtx) procCtx; HiveConf conf = aspCtx.getConf(); - int mapSideParallelism = - HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_STATS_MAP_SIDE_PARALLELISM); + long maxSplitSize = HiveConf.getLongVar(conf, + HiveConf.ConfVars.MAPREDMAXSPLITSIZE); + int mapSideParallelism = 1; List aggDesc = gop.getConf().getAggregators(); Map colExprMap = gop.getColumnExprMap(); RowSchema rs = gop.getSchema(); @@ -590,27 +603,48 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, boolean mapSide = false; int multiplier = mapSideParallelism; long newNumRows; - long newDataSize; + List colStats = + StatsUtils.getColStatisticsFromExprMap(conf, parentStats, colExprMap, rs); + boolean mapSideAgg = false; // map side if (gop.getChildOperators().get(0) instanceof ReduceSinkOperator || gop.getChildOperators().get(0) instanceof AppMasterEventOperator) { - mapSide = true; + mapSide = true; + + // consider approximate map side parallelism to be table data size + // divided by max split size + TableScanOperator top = OperatorUtils.findSingleOperatorUpstream(gop, + TableScanOperator.class); + final long inputSize; + // if top is null then there are multiple parents (RS as well), hence + // lets use parent statistics to get data size. Also maxSplitSize should + // be updated to bytes per reducer (1GB default) + if (top == null) { + inputSize = parentStats.getDataSize(); + maxSplitSize = HiveConf.getLongVar(conf, + HiveConf.ConfVars.BYTESPERREDUCER); + } else { + inputSize = top.getConf().getStatistics().getDataSize(); + } + mapSideParallelism = (int) Math.ceil((double) inputSize / maxSplitSize); + multiplier = mapSideParallelism; // map-side grouping set present. if grouping set is present then // multiply the number of rows by number of elements in grouping set if (gop.getConf().isGroupingSetsPresent()) { multiplier *= gop.getConf().getListGroupingSets().size(); + } else { + // in the absence of grouping sets the number of rows will not increase + multiplier *= 1; } } try { if (satisfyPrecondition(parentStats)) { + mapSideAgg = checkMapSideAggregation(gop, colStats, conf); stats = parentStats.clone(); - - List colStats = - StatsUtils.getColStatisticsFromExprMap(conf, parentStats, colExprMap, rs); stats.setColumnStats(colStats); long dvProd = 1; @@ -639,58 +673,42 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, } } - // map side - if (mapSide) { - - // since we do not know if hash-aggregation will be enabled or disabled - // at runtime we will assume that map-side group by does not do any - // reduction.hence no group by rule will be applied - - // map-side grouping set present. if grouping set is present then - // multiply the number of rows by number of elements in grouping set - if (gop.getConf().isGroupingSetsPresent()) { - newNumRows = setMaxIfInvalid(multiplier * stats.getNumRows()); - newDataSize = setMaxIfInvalid(multiplier * stats.getDataSize()); - stats.setNumRows(newNumRows); - stats.setDataSize(newDataSize); - for (ColStatistics cs : colStats) { - if (cs != null) { - long oldNumNulls = cs.getNumNulls(); - long newNumNulls = multiplier * oldNumNulls; - cs.setNumNulls(newNumNulls); - } - } - } else { - - // map side no grouping set - newNumRows = stats.getNumRows() * multiplier; - updateStats(stats, newNumRows, true, gop); - } - } else { - - // reduce side + // apply GBY rule when map aggregation is enabled or if it is reduce + // side group by + newNumRows = setMaxIfInvalid(multiplier * stats.getNumRows()); + if (mapSideAgg || !mapSide) { newNumRows = applyGBYRule(stats.getNumRows(), dvProd); - updateStats(stats, newNumRows, true, gop); } + + // when there is no grouping set, the number of rows should not + // exceed total number of rows + if (!gop.getConf().isGroupingSetsPresent()) { + newNumRows = Math.min(stats.getNumRows(), newNumRows); + } + + // update stats, but don't update NDV as it will not change + updateStats(stats, newNumRows, true, gop, false); } else { if (parentStats != null) { stats = parentStats.clone(); - // worst case, in the absence of column statistics assume half the rows are emitted + // worst case, in the absence of column statistics assume hash + // aggregation is disabled and reduce side only reduces half the + // number of rows if (mapSide) { - - // map side newNumRows = multiplier * stats.getNumRows(); - newDataSize = multiplier * stats.getDataSize(); - stats.setNumRows(newNumRows); - stats.setDataSize(newDataSize); } else { - - // reduce side newNumRows = parentStats.getNumRows() / 2; - updateStats(stats, newNumRows, false, gop); } + + // when there is no grouping set, the number of rows should not + // exceed total number of rows + if (!gop.getConf().isGroupingSetsPresent()) { + newNumRows = Math.min(stats.getNumRows(), newNumRows); + } + + updateStats(stats, newNumRows, false, gop); } } @@ -747,6 +765,107 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, return null; } + /** + * This method does not take into account many configs used at runtime to + * disable hash aggregation like HIVEMAPAGGRHASHMINREDUCTION. This method + * roughly estimates the number of rows and size of each row to see if it + * can fit in hashtable for aggregation. + * @param gop - group by operator + * @param colStats - column stats for key columns + * @param conf - hive conf + * @return + */ + private boolean checkMapSideAggregation(GroupByOperator gop, + List colStats, HiveConf conf) { + + List aggDesc = gop.getConf().getAggregators(); + GroupByDesc desc = gop.getConf(); + GroupByDesc.Mode mode = desc.getMode(); + + if (mode.equals(GroupByDesc.Mode.HASH)) { + float hashAggMem = conf.getFloatVar( + HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); + float hashAggMaxThreshold = conf.getFloatVar( + HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); + + // get memory for container. May be use mapreduce.map.java.opts instead? + long totalMemory = + DagUtils.getContainerResource(conf).getMemory() * 1000L * 1000L; + long maxMemHashAgg = Math + .round(totalMemory * hashAggMem * hashAggMaxThreshold); + + // estimated number of rows will be product of NDVs + long numEstimatedRows = 1; + + // estimate size of key from column statistics + long avgKeySize = 0; + for (ColStatistics cs : colStats) { + if (cs != null) { + numEstimatedRows *= cs.getCountDistint(); + avgKeySize += Math.ceil(cs.getAvgColLen()); + } + } + + // average value size will be sum of all sizes of aggregation buffers + long avgValSize = 0; + // go over all aggregation buffers and see they implement estimable + // interface if so they aggregate the size of the aggregation buffer + GenericUDAFEvaluator[] aggregationEvaluators; + aggregationEvaluators = new GenericUDAFEvaluator[aggDesc.size()]; + + // get aggregation evaluators + for (int i = 0; i < aggregationEvaluators.length; i++) { + AggregationDesc agg = aggDesc.get(i); + aggregationEvaluators[i] = agg.getGenericUDAFEvaluator(); + } + + // estimate size of aggregation buffer + for (int i = 0; i < aggregationEvaluators.length; i++) { + + // each evaluator has constant java object overhead + avgValSize += gop.javaObjectOverHead; + GenericUDAFEvaluator.AggregationBuffer agg = null; + try { + agg = aggregationEvaluators[i].getNewAggregationBuffer(); + } catch (HiveException e) { + // in case of exception assume unknown type (256 bytes) + avgValSize += gop.javaSizeUnknownType; + } + + // aggregate size from aggregation buffers + if (agg != null) { + if (GenericUDAFEvaluator.isEstimable(agg)) { + avgValSize += ((GenericUDAFEvaluator.AbstractAggregationBuffer) agg) + .estimate(); + } else { + // if the aggregation buffer is not estimable then get all the + // declared fields and compute the sizes from field types + Field[] fArr = ObjectInspectorUtils + .getDeclaredNonStaticFields(agg.getClass()); + for (Field f : fArr) { + long avgSize = StatsUtils + .getAvgColLenOfFixedLengthTypes(f.getType().getName()); + avgValSize += avgSize == 0 ? gop.javaSizeUnknownType : avgSize; + } + } + } + } + + // total size of each hash entry + long hashEntrySize = gop.javaHashEntryOverHead + avgKeySize + avgValSize; + + // estimated hash table size + long estHashTableSize = numEstimatedRows * hashEntrySize; + + if (estHashTableSize < maxMemHashAgg) { + return true; + } + } + + // worst-case, hash aggregation disabled + return false; + } + private long applyGBYRule(long numRows, long dvProd) { long newNumRows = numRows; @@ -1378,6 +1497,7 @@ public static NodeProcessor getDefaultRule() { return new DefaultStatsRule(); } + /** * Update the basic statistics of the statistics object based on the row number * @param stats @@ -1389,6 +1509,12 @@ public static NodeProcessor getDefaultRule() { */ static void updateStats(Statistics stats, long newNumRows, boolean useColStats, Operator op) { + updateStats(stats, newNumRows, useColStats, op, true); + } + + static void updateStats(Statistics stats, long newNumRows, + boolean useColStats, Operator op, + boolean updateNDV) { if (newNumRows <= 0) { LOG.info("STATS-" + op.toString() + ": Overflow in number of rows." @@ -1406,17 +1532,19 @@ static void updateStats(Statistics stats, long newNumRows, long oldNumNulls = cs.getNumNulls(); long oldDV = cs.getCountDistint(); long newNumNulls = Math.round(ratio * oldNumNulls); - long newDV = oldDV; - - // if ratio is greater than 1, then number of rows increases. This can happen - // when some operators like GROUPBY duplicates the input rows in which case - // number of distincts should not change. Update the distinct count only when - // the output number of rows is less than input number of rows. - if (ratio <= 1.0) { - newDV = (long) Math.ceil(ratio * oldDV); - } cs.setNumNulls(newNumNulls); - cs.setCountDistint(newDV); + if (updateNDV) { + long newDV = oldDV; + + // if ratio is greater than 1, then number of rows increases. This can happen + // when some operators like GROUPBY duplicates the input rows in which case + // number of distincts should not change. Update the distinct count only when + // the output number of rows is less than input number of rows. + if (ratio <= 1.0) { + newDV = (long) Math.ceil(ratio * oldDV); + } + cs.setCountDistint(newDV); + } } stats.setColumnStats(colStats); long newDataSize = StatsUtils.getDataSizeFromColumnStats(newNumRows, colStats); diff --git ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java index ce944bc..2b3ce4c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java @@ -767,7 +767,8 @@ public static long getAvgColLenOfFixedLengthTypes(String colType) { || colType.equalsIgnoreCase(serdeConstants.FLOAT_TYPE_NAME)) { return JavaDataModel.get().primitive1(); } else if (colType.equalsIgnoreCase(serdeConstants.DOUBLE_TYPE_NAME) - || colType.equalsIgnoreCase(serdeConstants.BIGINT_TYPE_NAME)) { + || colType.equalsIgnoreCase(serdeConstants.BIGINT_TYPE_NAME) + || colType.equalsIgnoreCase("long")) { return JavaDataModel.get().primitive2(); } else if (colType.equalsIgnoreCase(serdeConstants.TIMESTAMP_TYPE_NAME)) { return JavaDataModel.get().lengthOfTimestamp(); @@ -796,7 +797,8 @@ public static long getSizeOfPrimitiveTypeArraysFromType(String colType, int leng return JavaDataModel.get().lengthForIntArrayOfSize(length); } else if (colType.equalsIgnoreCase(serdeConstants.DOUBLE_TYPE_NAME)) { return JavaDataModel.get().lengthForDoubleArrayOfSize(length); - } else if (colType.equalsIgnoreCase(serdeConstants.BIGINT_TYPE_NAME)) { + } else if (colType.equalsIgnoreCase(serdeConstants.BIGINT_TYPE_NAME) + || colType.equalsIgnoreCase("long")) { return JavaDataModel.get().lengthForLongArrayOfSize(length); } else if (colType.equalsIgnoreCase(serdeConstants.BINARY_TYPE_NAME)) { return JavaDataModel.get().lengthForByteArrayOfSize(length); @@ -892,7 +894,7 @@ public static long getWritableSize(ObjectInspector oi, Object value) { Statistics parentStats, Map colExprMap, RowSchema rowSchema) { List cs = Lists.newArrayList(); - if (colExprMap != null) { + if (colExprMap != null && rowSchema != null) { for (ColumnInfo ci : rowSchema.getSignature()) { String outColName = ci.getInternalName(); outColName = StatsUtils.stripPrefixFromColumnName(outColName); diff --git ql/src/test/queries/clientpositive/annotate_stats_groupby.q ql/src/test/queries/clientpositive/annotate_stats_groupby.q index 1c0829d..d975d3b 100644 --- ql/src/test/queries/clientpositive/annotate_stats_groupby.q +++ ql/src/test/queries/clientpositive/annotate_stats_groupby.q @@ -1,4 +1,7 @@ set hive.stats.fetch.column.stats=true; +set hive.map.aggr.hash.percentmemory=0.0f; + +-- hash aggregation is disabled create table if not exists loc_staging ( state string, @@ -58,7 +61,8 @@ explain select state,locid from loc_orc group by state,locid grouping sets((stat -- map-side GBY numRows: 32 reduce-side GBY numRows: 16 explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()); -set hive.stats.map.parallelism=10; +set mapred.max.split.size=80; +-- map-side parallelism will be 10 -- map-side GBY: numRows: 80 (map-side will not do any reduction) -- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2) @@ -67,8 +71,8 @@ explain select year from loc_orc group by year; -- map-side GBY numRows: 320 reduce-side GBY numRows: 42 Reason: numDistinct of state and locid are 6,7 resp. numRows = min(320/2, 6*7) explain select state,locid from loc_orc group by state,locid with cube; +set mapred.max.split.size=1000; set hive.stats.fetch.column.stats=false; -set hive.stats.map.parallelism=1; -- map-side GBY numRows: 32 reduce-side GBY numRows: 16 explain select state,locid from loc_orc group by state,locid with cube; @@ -88,7 +92,7 @@ explain select state,locid from loc_orc group by state,locid grouping sets((stat -- map-side GBY numRows: 32 reduce-side GBY numRows: 16 explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()); -set hive.stats.map.parallelism=10; +set mapred.max.split.size=80; -- map-side GBY: numRows: 80 (map-side will not do any reduction) -- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2) diff --git ql/src/test/queries/clientpositive/annotate_stats_groupby_hashagg.q ql/src/test/queries/clientpositive/annotate_stats_groupby_hashagg.q new file mode 100644 index 0000000..b770261 --- /dev/null +++ ql/src/test/queries/clientpositive/annotate_stats_groupby_hashagg.q @@ -0,0 +1,102 @@ +set hive.stats.fetch.column.stats=true; +set hive.map.aggr.hash.percentmemory=0.5f; + +-- hash aggregation is disabled + +create table if not exists loc_staging ( + state string, + locid int, + zip bigint, + year int +) row format delimited fields terminated by '|' stored as textfile; + +create table loc_orc like loc_staging; +alter table loc_orc set fileformat orc; + +load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging; + +insert overwrite table loc_orc select * from loc_staging; + +-- numRows: 8 rawDataSize: 796 +explain select * from loc_orc; + +-- partial column stats +analyze table loc_orc compute statistics for columns state; + +-- inner group by: map - numRows: 8 reduce - numRows: 4 +-- outer group by: map - numRows: 4 reduce numRows: 2 +explain select a, c, min(b) +from ( select state as a, locid as b, count(*) as c + from loc_orc + group by state,locid + ) sq1 +group by a,c; + +analyze table loc_orc compute statistics for columns state,locid,zip,year; + +-- only one distinct value in year column + 1 NULL value +-- map-side GBY: numRows: 2 +-- reduce-side GBY: numRows: 1 +explain select year from loc_orc group by year; + +-- map-side GBY: numRows: 4 +-- reduce-side GBY: numRows: 2 +explain select state,locid from loc_orc group by state,locid; + +-- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid with cube; + +-- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid with rollup; + +-- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid grouping sets((state)); + +-- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)); + +-- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()); + +-- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()); + +set mapred.max.split.size=80; + +-- map-side GBY: numRows: 2 +-- reduce-side GBY: numRows: 1 +explain select year from loc_orc group by year; + +-- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid with cube; + +set mapred.max.split.size=1000; +set hive.stats.fetch.column.stats=false; + +-- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +explain select state,locid from loc_orc group by state,locid with cube; + +-- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +explain select state,locid from loc_orc group by state,locid with rollup; + +-- map-side GBY numRows: 8 reduce-side GBY numRows: 4 +explain select state,locid from loc_orc group by state,locid grouping sets((state)); + +-- map-side GBY numRows: 16 reduce-side GBY numRows: 8 +explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)); + +-- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()); + +-- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()); + +set mapred.max.split.size=80; + +-- map-side GBY: numRows: 80 +-- reduce-side GBY: numRows: 40 +explain select year from loc_orc group by year; + +-- map-side GBY numRows: 320 reduce-side GBY numRows: 160 +explain select state,locid from loc_orc group by state,locid with cube; + diff --git ql/src/test/results/clientpositive/annotate_stats_groupby.q.out ql/src/test/results/clientpositive/annotate_stats_groupby.q.out index 871c4217..d6712ea 100644 --- ql/src/test/results/clientpositive/annotate_stats_groupby.q.out +++ ql/src/test/results/clientpositive/annotate_stats_groupby.q.out @@ -1,4 +1,6 @@ -PREHOOK: query: create table if not exists loc_staging ( +PREHOOK: query: -- hash aggregation is disabled + +create table if not exists loc_staging ( state string, locid int, zip bigint, @@ -7,7 +9,9 @@ PREHOOK: query: create table if not exists loc_staging ( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@loc_staging -POSTHOOK: query: create table if not exists loc_staging ( +POSTHOOK: query: -- hash aggregation is disabled + +create table if not exists loc_staging ( state string, locid int, zip bigint, @@ -339,12 +343,12 @@ STAGE PLANS: keys: state (type: string), locid (type: int), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) @@ -394,12 +398,12 @@ STAGE PLANS: keys: state (type: string), locid (type: int), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) @@ -449,12 +453,12 @@ STAGE PLANS: keys: state (type: string), locid (type: int), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 1400 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 1400 Basic stats: COMPLETE Column stats: COMPLETE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) @@ -504,12 +508,12 @@ STAGE PLANS: keys: state (type: string), locid (type: int), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 16 Data size: 2800 Basic stats: COMPLETE Column stats: COMPLETE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) @@ -559,12 +563,12 @@ STAGE PLANS: keys: state (type: string), locid (type: int), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 24 Data size: 4200 Basic stats: COMPLETE Column stats: COMPLETE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) @@ -614,12 +618,12 @@ STAGE PLANS: keys: state (type: string), locid (type: int), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 32 Data size: 5600 Basic stats: COMPLETE Column stats: COMPLETE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) @@ -644,11 +648,15 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side GBY: numRows: 80 (map-side will not do any reduction) +PREHOOK: query: -- map-side parallelism will be 10 + +-- map-side GBY: numRows: 80 (map-side will not do any reduction) -- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2) explain select year from loc_orc group by year PREHOOK: type: QUERY -POSTHOOK: query: -- map-side GBY: numRows: 80 (map-side will not do any reduction) +POSTHOOK: query: -- map-side parallelism will be 10 + +-- map-side GBY: numRows: 80 (map-side will not do any reduction) -- reduce-side GBY: numRows: 2 Reason: numDistinct of year is 2. numRows = min(80/2, 2) explain select year from loc_orc group by year POSTHOOK: type: QUERY @@ -671,12 +679,12 @@ STAGE PLANS: keys: year (type: int) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 80 Data size: 280 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 80 Data size: 280 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: int) @@ -726,12 +734,12 @@ STAGE PLANS: keys: state (type: string), locid (type: int), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 320 Data size: 31840 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 320 Data size: 56000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 320 Data size: 31840 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 320 Data size: 56000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) @@ -1113,25 +1121,25 @@ STAGE PLANS: keys: year (type: int) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 80 Data size: 7960 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 80 Data size: 7960 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 40 Data size: 3980 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 40 Data size: 3980 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false - Statistics: Num rows: 40 Data size: 3980 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git ql/src/test/results/clientpositive/annotate_stats_groupby_hashagg.q.out ql/src/test/results/clientpositive/annotate_stats_groupby_hashagg.q.out new file mode 100644 index 0000000..c790d61 --- /dev/null +++ ql/src/test/results/clientpositive/annotate_stats_groupby_hashagg.q.out @@ -0,0 +1,1204 @@ +PREHOOK: query: -- hash aggregation is disabled + +create table if not exists loc_staging ( + state string, + locid int, + zip bigint, + year int +) row format delimited fields terminated by '|' stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@loc_staging +POSTHOOK: query: -- hash aggregation is disabled + +create table if not exists loc_staging ( + state string, + locid int, + zip bigint, + year int +) row format delimited fields terminated by '|' stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@loc_staging +PREHOOK: query: create table loc_orc like loc_staging +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@loc_orc +POSTHOOK: query: create table loc_orc like loc_staging +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@loc_orc +PREHOOK: query: alter table loc_orc set fileformat orc +PREHOOK: type: ALTERTABLE_FILEFORMAT +PREHOOK: Input: default@loc_orc +PREHOOK: Output: default@loc_orc +POSTHOOK: query: alter table loc_orc set fileformat orc +POSTHOOK: type: ALTERTABLE_FILEFORMAT +POSTHOOK: Input: default@loc_orc +POSTHOOK: Output: default@loc_orc +PREHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@loc_staging +POSTHOOK: query: load data local inpath '../../data/files/loc.txt' overwrite into table loc_staging +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@loc_staging +PREHOOK: query: insert overwrite table loc_orc select * from loc_staging +PREHOOK: type: QUERY +PREHOOK: Input: default@loc_staging +PREHOOK: Output: default@loc_orc +POSTHOOK: query: insert overwrite table loc_orc select * from loc_staging +POSTHOOK: type: QUERY +POSTHOOK: Input: default@loc_staging +POSTHOOK: Output: default@loc_orc +POSTHOOK: Lineage: loc_orc.locid SIMPLE [(loc_staging)loc_staging.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc.state SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc.year SIMPLE [(loc_staging)loc_staging.FieldSchema(name:year, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc.zip SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, comment:null), ] +PREHOOK: query: -- numRows: 8 rawDataSize: 796 +explain select * from loc_orc +PREHOOK: type: QUERY +POSTHOOK: query: -- numRows: 8 rawDataSize: 796 +explain select * from loc_orc +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: state (type: string), locid (type: int), zip (type: bigint), year (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + ListSink + +PREHOOK: query: -- partial column stats +analyze table loc_orc compute statistics for columns state +PREHOOK: type: QUERY +PREHOOK: Input: default@loc_orc +#### A masked pattern was here #### +POSTHOOK: query: -- partial column stats +analyze table loc_orc compute statistics for columns state +POSTHOOK: type: QUERY +POSTHOOK: Input: default@loc_orc +#### A masked pattern was here #### +PREHOOK: query: -- inner group by: map - numRows: 8 reduce - numRows: 4 +-- outer group by: map - numRows: 4 reduce numRows: 2 +explain select a, c, min(b) +from ( select state as a, locid as b, count(*) as c + from loc_orc + group by state,locid + ) sq1 +group by a,c +PREHOOK: type: QUERY +POSTHOOK: query: -- inner group by: map - numRows: 8 reduce - numRows: 4 +-- outer group by: map - numRows: 4 reduce numRows: 2 +explain select a, c, min(b) +from ( select state as a, locid as b, count(*) as c + from loc_orc + group by state,locid + ) sq1 +group by a,c +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: PARTIAL + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: state, locid + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: PARTIAL + Group By Operator + aggregations: count() + keys: state (type: string), locid (type: int) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 4 Data size: 376 Basic stats: COMPLETE Column stats: PARTIAL + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: int) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: int) + Statistics: Num rows: 4 Data size: 376 Basic stats: COMPLETE Column stats: PARTIAL + value expressions: _col2 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE Column stats: PARTIAL + Select Operator + expressions: _col0 (type: string), _col1 (type: int), _col2 (type: bigint) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 200 Basic stats: COMPLETE Column stats: PARTIAL + Group By Operator + aggregations: min(_col1) + keys: _col0 (type: string), _col2 (type: bigint) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 104 Basic stats: COMPLETE Column stats: PARTIAL + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: bigint) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint) + Statistics: Num rows: 1 Data size: 104 Basic stats: COMPLETE Column stats: PARTIAL + value expressions: _col2 (type: int) + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: bigint) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 104 Basic stats: COMPLETE Column stats: PARTIAL + Select Operator + expressions: _col0 (type: string), _col1 (type: bigint), _col2 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 104 Basic stats: COMPLETE Column stats: PARTIAL + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 104 Basic stats: COMPLETE Column stats: PARTIAL + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: analyze table loc_orc compute statistics for columns state,locid,zip,year +PREHOOK: type: QUERY +PREHOOK: Input: default@loc_orc +#### A masked pattern was here #### +POSTHOOK: query: analyze table loc_orc compute statistics for columns state,locid,zip,year +POSTHOOK: type: QUERY +POSTHOOK: Input: default@loc_orc +#### A masked pattern was here #### +PREHOOK: query: -- only one distinct value in year column + 1 NULL value +-- map-side GBY: numRows: 2 +-- reduce-side GBY: numRows: 1 +explain select year from loc_orc group by year +PREHOOK: type: QUERY +POSTHOOK: query: -- only one distinct value in year column + 1 NULL value +-- map-side GBY: numRows: 2 +-- reduce-side GBY: numRows: 1 +explain select year from loc_orc group by year +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: year (type: int) + outputColumnNames: year + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: year (type: int) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- map-side GBY: numRows: 4 +-- reduce-side GBY: numRows: 2 +explain select state,locid from loc_orc group by state,locid +PREHOOK: type: QUERY +POSTHOOK: query: -- map-side GBY: numRows: 4 +-- reduce-side GBY: numRows: 2 +explain select state,locid from loc_orc group by state,locid +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: state, locid + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: state (type: string), locid (type: int) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 4 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: int) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: int) + Statistics: Num rows: 4 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: int) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: string), _col1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: type: QUERY +POSTHOOK: query: -- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: state, locid + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: state (type: string), locid (type: int), '0' (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 4 Data size: 700 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) + Statistics: Num rows: 4 Data size: 700 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 350 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: string), _col1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid with rollup +PREHOOK: type: QUERY +POSTHOOK: query: -- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid with rollup +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: state, locid + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: state (type: string), locid (type: int), '0' (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 4 Data size: 700 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) + Statistics: Num rows: 4 Data size: 700 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 350 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: string), _col1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid grouping sets((state)) +PREHOOK: type: QUERY +POSTHOOK: query: -- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid grouping sets((state)) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: state, locid + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: state (type: string), locid (type: int), '0' (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 4 Data size: 700 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) + Statistics: Num rows: 4 Data size: 700 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 350 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: string), _col1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +PREHOOK: type: QUERY +POSTHOOK: query: -- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: state, locid + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: state (type: string), locid (type: int), '0' (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 4 Data size: 700 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) + Statistics: Num rows: 4 Data size: 700 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 350 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: string), _col1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +PREHOOK: type: QUERY +POSTHOOK: query: -- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: state, locid + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: state (type: string), locid (type: int), '0' (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 4 Data size: 700 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) + Statistics: Num rows: 4 Data size: 700 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 350 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: string), _col1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +PREHOOK: type: QUERY +POSTHOOK: query: -- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: state, locid + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: state (type: string), locid (type: int), '0' (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 4 Data size: 700 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) + Statistics: Num rows: 4 Data size: 700 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 350 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: string), _col1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- map-side GBY: numRows: 2 +-- reduce-side GBY: numRows: 1 +explain select year from loc_orc group by year +PREHOOK: type: QUERY +POSTHOOK: query: -- map-side GBY: numRows: 2 +-- reduce-side GBY: numRows: 1 +explain select year from loc_orc group by year +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: year (type: int) + outputColumnNames: year + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: year (type: int) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: type: QUERY +POSTHOOK: query: -- map-side GBY numRows: 4 reduce-side GBY numRows: 2 +explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: state, locid + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: state (type: string), locid (type: int), '0' (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 4 Data size: 700 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) + Statistics: Num rows: 4 Data size: 700 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 350 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: string), _col1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 180 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: type: QUERY +POSTHOOK: query: -- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: state, locid + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: state (type: string), locid (type: int), '0' (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) + Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +explain select state,locid from loc_orc group by state,locid with rollup +PREHOOK: type: QUERY +POSTHOOK: query: -- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +explain select state,locid from loc_orc group by state,locid with rollup +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: state, locid + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: state (type: string), locid (type: int), '0' (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) + Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- map-side GBY numRows: 8 reduce-side GBY numRows: 4 +explain select state,locid from loc_orc group by state,locid grouping sets((state)) +PREHOOK: type: QUERY +POSTHOOK: query: -- map-side GBY numRows: 8 reduce-side GBY numRows: 4 +explain select state,locid from loc_orc group by state,locid grouping sets((state)) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: state, locid + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: state (type: string), locid (type: int), '0' (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- map-side GBY numRows: 16 reduce-side GBY numRows: 8 +explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +PREHOOK: type: QUERY +POSTHOOK: query: -- map-side GBY numRows: 16 reduce-side GBY numRows: 8 +explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: state, locid + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: state (type: string), locid (type: int), '0' (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) + Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +PREHOOK: type: QUERY +POSTHOOK: query: -- map-side GBY numRows: 24 reduce-side GBY numRows: 12 +explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: state, locid + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: state (type: string), locid (type: int), '0' (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) + Statistics: Num rows: 24 Data size: 2388 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 12 Data size: 1194 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +PREHOOK: type: QUERY +POSTHOOK: query: -- map-side GBY numRows: 32 reduce-side GBY numRows: 16 +explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: state, locid + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: state (type: string), locid (type: int), '0' (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) + Statistics: Num rows: 32 Data size: 3184 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 16 Data size: 1592 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- map-side GBY: numRows: 80 +-- reduce-side GBY: numRows: 40 +explain select year from loc_orc group by year +PREHOOK: type: QUERY +POSTHOOK: query: -- map-side GBY: numRows: 80 +-- reduce-side GBY: numRows: 40 +explain select year from loc_orc group by year +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: year (type: int) + outputColumnNames: year + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: year (type: int) + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 4 Data size: 398 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- map-side GBY numRows: 320 reduce-side GBY numRows: 160 +explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: type: QUERY +POSTHOOK: query: -- map-side GBY numRows: 320 reduce-side GBY numRows: 160 +explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: state, locid + Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: state (type: string), locid (type: int), '0' (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 320 Data size: 31840 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) + sort order: +++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) + Statistics: Num rows: 320 Data size: 31840 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 160 Data size: 15920 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 160 Data size: 15920 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 160 Data size: 15920 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + diff --git ql/src/test/results/clientpositive/binarysortable_1.q.out ql/src/test/results/clientpositive/binarysortable_1.q.out index 7ff3fd4..e486466 100644 --- ql/src/test/results/clientpositive/binarysortable_1.q.out +++ ql/src/test/results/clientpositive/binarysortable_1.q.out @@ -56,12 +56,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 93 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 93 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: double) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/combine2.q.out ql/src/test/results/clientpositive/combine2.q.out index 831612d..7a6d328 100644 --- ql/src/test/results/clientpositive/combine2.q.out +++ ql/src/test/results/clientpositive/combine2.q.out @@ -674,12 +674,12 @@ STAGE PLANS: keys: ds (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 2000 Data size: 384000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 192000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2000 Data size: 384000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 192000 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -687,14 +687,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 1000 Data size: 192000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 96000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1000 Data size: 192000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 96000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1000 Data size: 192000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 96000 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git ql/src/test/results/clientpositive/groupby_cube1.q.out ql/src/test/results/clientpositive/groupby_cube1.q.out index 0f840d6..7b5d70a 100644 --- ql/src/test/results/clientpositive/groupby_cube1.q.out +++ ql/src/test/results/clientpositive/groupby_cube1.q.out @@ -44,12 +44,12 @@ STAGE PLANS: keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -128,12 +128,12 @@ STAGE PLANS: keys: key (type: string), '0' (type: string), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Operator Tree: Group By Operator aggregations: count(DISTINCT KEY._col2:0._col0) @@ -200,12 +200,12 @@ STAGE PLANS: keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -213,7 +213,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -229,7 +229,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -308,12 +308,12 @@ STAGE PLANS: keys: key (type: string), '0' (type: string), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Operator Tree: Group By Operator aggregations: count(DISTINCT KEY._col2:0._col0) @@ -405,12 +405,12 @@ STAGE PLANS: keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Select Operator expressions: key (type: string), val (type: string) @@ -421,7 +421,7 @@ STAGE PLANS: keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -434,7 +434,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -450,7 +450,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -493,7 +493,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -501,7 +501,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -517,7 +517,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 120 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out index 75aaddc..7c88a67 100644 --- ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out +++ ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out @@ -43,12 +43,12 @@ STAGE PLANS: keys: a (type: string), b (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col2 (type: bigint) Reduce Operator Tree: Group By Operator @@ -56,7 +56,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), '0' (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 144 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -72,7 +72,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Statistics: Num rows: 0 Data size: 144 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -149,12 +149,12 @@ STAGE PLANS: keys: a (type: string), b (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col2 (type: double) Reduce Operator Tree: Group By Operator @@ -162,7 +162,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), '0' (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 144 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -178,7 +178,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Statistics: Num rows: 0 Data size: 144 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: double) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out index a1842c1..6ae6e55 100644 --- ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out +++ ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out @@ -62,12 +62,12 @@ STAGE PLANS: keys: a (type: string), b (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 0 Data size: 288 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Statistics: Num rows: 0 Data size: 288 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: struct), _col4 (type: bigint) Reduce Operator Tree: Group By Operator @@ -149,12 +149,12 @@ STAGE PLANS: keys: a (type: string), b (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 72 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col2 (type: struct), _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -162,7 +162,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), '0' (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 0 Data size: 288 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -178,7 +178,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Statistics: Num rows: 0 Data size: 288 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: struct), _col4 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out index ce229d3..cf1515c 100644 --- ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out +++ ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out @@ -49,12 +49,12 @@ STAGE PLANS: keys: a (type: string), b (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col2 (type: bigint) Reduce Operator Tree: Group By Operator @@ -172,12 +172,12 @@ STAGE PLANS: keys: a (type: string), b (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col2 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/groupby_rollup1.q.out ql/src/test/results/clientpositive/groupby_rollup1.q.out index 827b081..bc77aea 100644 --- ql/src/test/results/clientpositive/groupby_rollup1.q.out +++ ql/src/test/results/clientpositive/groupby_rollup1.q.out @@ -44,12 +44,12 @@ STAGE PLANS: keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: string) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -122,12 +122,12 @@ STAGE PLANS: keys: key (type: string), '0' (type: string), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Operator Tree: Group By Operator aggregations: count(DISTINCT KEY._col2:0._col0) @@ -194,12 +194,12 @@ STAGE PLANS: keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -207,7 +207,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -223,7 +223,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -296,12 +296,12 @@ STAGE PLANS: keys: key (type: string), '0' (type: string), val (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Operator Tree: Group By Operator aggregations: count(DISTINCT KEY._col2:0._col0) @@ -393,12 +393,12 @@ STAGE PLANS: keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Select Operator expressions: key (type: string), val (type: string) @@ -409,7 +409,7 @@ STAGE PLANS: keys: key (type: string), val (type: string), '0' (type: string) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -422,7 +422,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -438,7 +438,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -481,7 +481,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator @@ -489,7 +489,7 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string), KEY._col2 (type: string) mode: partials outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false table: @@ -505,7 +505,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string) sort order: +++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 90 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col3 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/groupby_sort_11.q.out ql/src/test/results/clientpositive/groupby_sort_11.q.out index 311815a..0f809bc 100644 --- ql/src/test/results/clientpositive/groupby_sort_11.q.out +++ ql/src/test/results/clientpositive/groupby_sort_11.q.out @@ -396,11 +396,11 @@ STAGE PLANS: keys: 1 (type: int) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + - Statistics: Num rows: 10 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Reduce Operator Tree: Group By Operator aggregations: count(DISTINCT KEY._col0:0._col0) diff --git ql/src/test/results/clientpositive/groupby_sort_6.q.out ql/src/test/results/clientpositive/groupby_sort_6.q.out index cd1f979..67dee46 100644 --- ql/src/test/results/clientpositive/groupby_sort_6.q.out +++ ql/src/test/results/clientpositive/groupby_sort_6.q.out @@ -367,12 +367,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE tag: -1 value expressions: _col1 (type: bigint) auto parallelism: false diff --git ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out index 781cac3..b878e1e 100644 --- ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out +++ ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out @@ -564,25 +564,25 @@ STAGE PLANS: keys: hr (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out index 8467df2..0e65f99 100644 --- ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out +++ ql/src/test/results/clientpositive/tez/dynamic_partition_pruning.q.out @@ -65,26 +65,26 @@ STAGE PLANS: keys: ds (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE Reducer 2 Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1910,12 +1910,12 @@ STAGE PLANS: keys: ds (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE Reducer 2 Reduce Operator Tree: Join Operator @@ -1924,9 +1924,9 @@ STAGE PLANS: condition expressions: 0 1 - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Group By Operator aggregations: count() mode: hash @@ -1960,16 +1960,16 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 46000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 46000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 46000 Basic stats: COMPLETE Column stats: COMPLETE Stage: Stage-0 Fetch Operator @@ -2931,9 +2931,9 @@ STAGE PLANS: condition expressions: 0 1 - Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Group By Operator aggregations: count() mode: hash @@ -3129,21 +3129,21 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 outputColumnNames: _col2 - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col2 (type: string) outputColumnNames: _col2 - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col2 (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Reducer 5 Reduce Operator Tree: Group By Operator @@ -3292,12 +3292,12 @@ STAGE PLANS: keys: ds (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE Map 8 Map Operator Tree: TableScan @@ -3312,12 +3312,12 @@ STAGE PLANS: keys: ds (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE Reducer 11 Reduce Operator Tree: Group By Operator @@ -3419,14 +3419,14 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 outputColumnNames: _col0 - Statistics: Num rows: 4000 Data size: 336000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 84000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 4000 Data size: 336000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 84000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 4000 Data size: 336000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 84000 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -4366,23 +4366,23 @@ STAGE PLANS: keys: ds (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE Reducer 3 Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 46000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 46000 Basic stats: COMPLETE Column stats: COMPLETE Map Join Operator condition map: Inner Join 0 to 1 @@ -4394,9 +4394,9 @@ STAGE PLANS: 1 _col0 (type: string) input vertices: 0 Map 1 - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Group By Operator aggregations: count() mode: hash @@ -5016,21 +5016,21 @@ STAGE PLANS: outputColumnNames: _col2 input vertices: 1 Union 3 - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col2 (type: string) outputColumnNames: _col2 - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col2 (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Map 6 Map Operator Tree: TableScan diff --git ql/src/test/results/clientpositive/tez/metadataonly1.q.out ql/src/test/results/clientpositive/tez/metadataonly1.q.out index fa22920..6a18ec6 100644 --- ql/src/test/results/clientpositive/tez/metadataonly1.q.out +++ ql/src/test/results/clientpositive/tez/metadataonly1.q.out @@ -860,11 +860,11 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE Dynamic Partitioning Event Operator Target Input: a2 Partition key expr: ds - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE Target column: ds Target Vertex: Map 5 Reducer 3 diff --git ql/src/test/results/clientpositive/tez/union7.q.out ql/src/test/results/clientpositive/tez/union7.q.out index 43f678e..2971029 100644 --- ql/src/test/results/clientpositive/tez/union7.q.out +++ ql/src/test/results/clientpositive/tez/union7.q.out @@ -92,14 +92,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out index 22053ff..0126033 100644 --- ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out +++ ql/src/test/results/clientpositive/tez/vectorized_dynamic_partition_pruning.q.out @@ -65,26 +65,26 @@ STAGE PLANS: keys: ds (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE Reducer 2 Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col0 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1930,12 +1930,12 @@ STAGE PLANS: keys: ds (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE Reducer 2 Reduce Operator Tree: Join Operator @@ -1944,9 +1944,9 @@ STAGE PLANS: condition expressions: 0 1 - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Group By Operator aggregations: count() mode: hash @@ -1981,16 +1981,16 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 46000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 46000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 46000 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized Stage: Stage-0 @@ -2965,9 +2965,9 @@ STAGE PLANS: condition expressions: 0 1 - Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - Statistics: Num rows: 2 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Group By Operator aggregations: count() mode: hash @@ -3164,21 +3164,21 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 outputColumnNames: _col2 - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col2 (type: string) outputColumnNames: _col2 - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col2 (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Reducer 5 Reduce Operator Tree: Group By Operator @@ -3328,12 +3328,12 @@ STAGE PLANS: keys: ds (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE Map 8 Map Operator Tree: TableScan @@ -3348,12 +3348,12 @@ STAGE PLANS: keys: ds (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2000 Data size: 368000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE Reducer 11 Reduce Operator Tree: Group By Operator @@ -3455,14 +3455,14 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 outputColumnNames: _col0 - Statistics: Num rows: 4000 Data size: 336000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 84000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 4000 Data size: 336000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 84000 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 4000 Data size: 336000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1000 Data size: 84000 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -3559,6 +3559,8 @@ STAGE PLANS: keys: 0 ds (type: string) 1 ds (type: string) + input vertices: + 1 Map 3 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE @@ -3694,6 +3696,8 @@ STAGE PLANS: 0 ds (type: string) 1 ds (type: string) outputColumnNames: _col3 + input vertices: + 1 Map 4 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -3704,6 +3708,8 @@ STAGE PLANS: keys: 0 _col3 (type: string) 1 hr (type: string) + input vertices: + 1 Map 3 Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 2420 Data size: 25709 Basic stats: COMPLETE Column stats: NONE @@ -3867,6 +3873,8 @@ STAGE PLANS: keys: 0 ds (type: string), hr (type: string) 1 ds (type: string), hr (type: string) + input vertices: + 1 Map 3 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE @@ -4012,6 +4020,8 @@ STAGE PLANS: keys: 0 ds (type: string) 1 ds (type: string) + input vertices: + 1 Map 3 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE @@ -4122,6 +4132,8 @@ STAGE PLANS: keys: 0 UDFToDouble(hr) (type: double) 1 UDFToDouble(UDFToInteger((hr / 2))) (type: double) + input vertices: + 1 Map 3 Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE @@ -4242,6 +4254,8 @@ STAGE PLANS: keys: 0 (hr * 2) (type: double) 1 hr (type: double) + input vertices: + 1 Map 3 Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE @@ -4399,23 +4413,23 @@ STAGE PLANS: keys: ds (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1000 Data size: 184000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE Reducer 3 Reduce Operator Tree: Group By Operator keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 46000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string) outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 46000 Basic stats: COMPLETE Column stats: COMPLETE Map Join Operator condition map: Inner Join 0 to 1 @@ -4425,9 +4439,11 @@ STAGE PLANS: keys: 0 ds (type: string) 1 _col0 (type: string) - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + input vertices: + 0 Map 1 + Statistics: Num rows: 250 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 250 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Group By Operator aggregations: count() mode: hash @@ -4526,6 +4542,8 @@ STAGE PLANS: 0 ds (type: string) 1 ds (type: string) outputColumnNames: _col8 + input vertices: + 1 Map 3 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (_col8 = '2008-04-08') (type: boolean) @@ -4623,6 +4641,8 @@ STAGE PLANS: keys: 0 ds (type: string) 1 ds (type: string) + input vertices: + 1 Map 1 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE @@ -4788,6 +4808,8 @@ STAGE PLANS: 0 ds (type: string) 1 ds (type: string) outputColumnNames: _col3 + input vertices: + 1 Map 4 Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -4798,6 +4820,8 @@ STAGE PLANS: keys: 0 _col3 (type: string) 1 '11' (type: string) + input vertices: + 1 Map 3 Statistics: Num rows: 1210 Data size: 12854 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 1210 Data size: 12854 Basic stats: COMPLETE Column stats: NONE @@ -5048,21 +5072,23 @@ STAGE PLANS: 0 ds (type: string) 1 _col0 (type: string) outputColumnNames: _col2 - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + input vertices: + 1 Union 3 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col2 (type: string) outputColumnNames: _col2 - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: _col2 (type: string) mode: hash outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2 Data size: 368 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Map 6 Map Operator Tree: TableScan @@ -5266,6 +5292,8 @@ STAGE PLANS: keys: 0 ds (type: string), UDFToDouble(hr) (type: double) 1 ds (type: string), UDFToDouble(hr) (type: double) + input vertices: + 1 Map 3 Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE Select Operator Statistics: Num rows: 1100 Data size: 0 Basic stats: PARTIAL Column stats: NONE diff --git ql/src/test/results/clientpositive/udf_explode.q.out ql/src/test/results/clientpositive/udf_explode.q.out index e07a987..301b1b7 100644 --- ql/src/test/results/clientpositive/udf_explode.q.out +++ ql/src/test/results/clientpositive/udf_explode.q.out @@ -217,12 +217,12 @@ STAGE PLANS: keys: col (type: int) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 value expressions: _col1 (type: bigint) auto parallelism: false @@ -282,17 +282,17 @@ STAGE PLANS: keys: KEY._col0 (type: int) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 250 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat @@ -574,12 +574,12 @@ STAGE PLANS: keys: key (type: int), value (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 500 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE tag: -1 value expressions: _col2 (type: bigint) auto parallelism: false @@ -639,17 +639,17 @@ STAGE PLANS: keys: KEY._col0 (type: int), KEY._col1 (type: string) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 250 Data size: 4000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE #### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat diff --git ql/src/test/results/clientpositive/union11.q.out ql/src/test/results/clientpositive/union11.q.out index fc28d05..b624c7d 100644 --- ql/src/test/results/clientpositive/union11.q.out +++ ql/src/test/results/clientpositive/union11.q.out @@ -75,12 +75,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) TableScan Union @@ -94,12 +94,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) TableScan Union @@ -113,12 +113,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 3 Data size: 288 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union14.q.out ql/src/test/results/clientpositive/union14.q.out index 84464a5..02821d7 100644 --- ql/src/test/results/clientpositive/union14.q.out +++ ql/src/test/results/clientpositive/union14.q.out @@ -77,12 +77,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 26 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) TableScan Union @@ -96,12 +96,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 26 Data size: 2496 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -109,14 +109,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git ql/src/test/results/clientpositive/union15.q.out ql/src/test/results/clientpositive/union15.q.out index 6337153..9cac42b 100644 --- ql/src/test/results/clientpositive/union15.q.out +++ ql/src/test/results/clientpositive/union15.q.out @@ -73,12 +73,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 51 Data size: 4896 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 25 Data size: 2400 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 51 Data size: 4896 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 25 Data size: 2400 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) TableScan alias: s2 @@ -98,12 +98,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 51 Data size: 4896 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 25 Data size: 2400 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 51 Data size: 4896 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 25 Data size: 2400 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) TableScan alias: s3 @@ -123,12 +123,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 51 Data size: 4896 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 25 Data size: 2400 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 51 Data size: 4896 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 25 Data size: 2400 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -136,14 +136,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 25 Data size: 2500 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 12 Data size: 1200 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 25 Data size: 2500 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 12 Data size: 1200 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 25 Data size: 2500 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 12 Data size: 1200 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git ql/src/test/results/clientpositive/union17.q.out ql/src/test/results/clientpositive/union17.q.out index fc647ff..699fc7c 100644 --- ql/src/test/results/clientpositive/union17.q.out +++ ql/src/test/results/clientpositive/union17.q.out @@ -114,7 +114,7 @@ STAGE PLANS: keys: VALUE._col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 501 Data size: 96192 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 250 Data size: 48000 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false table: @@ -126,7 +126,7 @@ STAGE PLANS: keys: VALUE._col0 (type: string), VALUE._col1 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 501 Data size: 188376 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 250 Data size: 94000 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false table: @@ -142,7 +142,7 @@ STAGE PLANS: key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 501 Data size: 96192 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 250 Data size: 48000 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -150,14 +150,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: final outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 48000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 125 Data size: 24000 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 48000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 125 Data size: 24000 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 48000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 125 Data size: 24000 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -185,7 +185,7 @@ STAGE PLANS: key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 501 Data size: 188376 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 250 Data size: 94000 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col2 (type: bigint) Reduce Operator Tree: Group By Operator @@ -193,14 +193,14 @@ STAGE PLANS: keys: KEY._col0 (type: string), KEY._col1 (type: string) mode: final outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 94000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 125 Data size: 47000 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: bigint) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 250 Data size: 94000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 125 Data size: 47000 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 94000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 125 Data size: 47000 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git ql/src/test/results/clientpositive/union19.q.out ql/src/test/results/clientpositive/union19.q.out index 341c4cd..3e8fea3 100644 --- ql/src/test/results/clientpositive/union19.q.out +++ ql/src/test/results/clientpositive/union19.q.out @@ -90,12 +90,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 501 Data size: 48096 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 250 Data size: 24000 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 501 Data size: 48096 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 250 Data size: 24000 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) Select Operator expressions: _col0 (type: string), _col1 (type: string), _col1 (type: string) @@ -127,12 +127,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 501 Data size: 48096 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 250 Data size: 24000 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 501 Data size: 48096 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 250 Data size: 24000 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) Select Operator expressions: _col0 (type: string), _col1 (type: string), _col1 (type: string) @@ -152,14 +152,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 25000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 125 Data size: 12500 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 25000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 125 Data size: 12500 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 250 Data size: 25000 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 125 Data size: 12500 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git ql/src/test/results/clientpositive/union21.q.out ql/src/test/results/clientpositive/union21.q.out index eaaffb0..fb2328d 100644 --- ql/src/test/results/clientpositive/union21.q.out +++ ql/src/test/results/clientpositive/union21.q.out @@ -60,12 +60,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) TableScan alias: src_thrift @@ -85,12 +85,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) TableScan alias: src @@ -110,12 +110,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) TableScan alias: src @@ -135,12 +135,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) TableScan alias: src @@ -160,12 +160,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 1522 Data size: 141546 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 761 Data size: 70773 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -173,14 +173,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 761 Data size: 76100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 380 Data size: 38000 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 761 Data size: 76100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 380 Data size: 38000 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 761 Data size: 76100 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 380 Data size: 38000 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git ql/src/test/results/clientpositive/union5.q.out ql/src/test/results/clientpositive/union5.q.out index 70c18b9..5d2a447 100644 --- ql/src/test/results/clientpositive/union5.q.out +++ ql/src/test/results/clientpositive/union5.q.out @@ -70,12 +70,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) TableScan Union @@ -89,12 +89,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 2 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union7.q.out ql/src/test/results/clientpositive/union7.q.out index 26561ae..46ed3db 100644 --- ql/src/test/results/clientpositive/union7.q.out +++ ql/src/test/results/clientpositive/union7.q.out @@ -69,12 +69,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 2496 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 13 Data size: 1248 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 26 Data size: 2496 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 13 Data size: 1248 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) TableScan alias: s2 @@ -94,12 +94,12 @@ STAGE PLANS: keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 2496 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 13 Data size: 1248 Basic stats: COMPLETE Column stats: PARTIAL Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 26 Data size: 2496 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 13 Data size: 1248 Basic stats: COMPLETE Column stats: PARTIAL value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -107,14 +107,14 @@ STAGE PLANS: keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 13 Data size: 1300 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 600 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git ql/src/test/results/clientpositive/union_remove_1.q.out ql/src/test/results/clientpositive/union_remove_1.q.out index 6e03d4e..62bc729 100644 --- ql/src/test/results/clientpositive/union_remove_1.q.out +++ ql/src/test/results/clientpositive/union_remove_1.q.out @@ -81,12 +81,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -133,12 +133,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union_remove_10.q.out ql/src/test/results/clientpositive/union_remove_10.q.out index 017944f..b078793 100644 --- ql/src/test/results/clientpositive/union_remove_10.q.out +++ ql/src/test/results/clientpositive/union_remove_10.q.out @@ -103,12 +103,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union_remove_13.q.out ql/src/test/results/clientpositive/union_remove_13.q.out index f2a7324..3e129e7 100644 --- ql/src/test/results/clientpositive/union_remove_13.q.out +++ ql/src/test/results/clientpositive/union_remove_13.q.out @@ -95,12 +95,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union_remove_15.q.out ql/src/test/results/clientpositive/union_remove_15.q.out index 902400d..f37b098 100644 --- ql/src/test/results/clientpositive/union_remove_15.q.out +++ ql/src/test/results/clientpositive/union_remove_15.q.out @@ -87,12 +87,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -141,12 +141,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union_remove_16.q.out ql/src/test/results/clientpositive/union_remove_16.q.out index e92931c..3e84c7e 100644 --- ql/src/test/results/clientpositive/union_remove_16.q.out +++ ql/src/test/results/clientpositive/union_remove_16.q.out @@ -90,12 +90,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -173,12 +173,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union_remove_18.q.out ql/src/test/results/clientpositive/union_remove_18.q.out index eed4394..d1cff4a 100644 --- ql/src/test/results/clientpositive/union_remove_18.q.out +++ ql/src/test/results/clientpositive/union_remove_18.q.out @@ -85,12 +85,12 @@ STAGE PLANS: keys: key (type: string), ds (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col2 (type: bigint) Reduce Operator Tree: Group By Operator @@ -139,12 +139,12 @@ STAGE PLANS: keys: key (type: string), ds (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string), _col1 (type: string) sort order: ++ Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col2 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union_remove_19.q.out ql/src/test/results/clientpositive/union_remove_19.q.out index 51c154f..fb4c192 100644 --- ql/src/test/results/clientpositive/union_remove_19.q.out +++ ql/src/test/results/clientpositive/union_remove_19.q.out @@ -85,12 +85,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -137,12 +137,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -445,12 +445,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -504,12 +504,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union_remove_2.q.out ql/src/test/results/clientpositive/union_remove_2.q.out index 1efdce6..e407139 100644 --- ql/src/test/results/clientpositive/union_remove_2.q.out +++ ql/src/test/results/clientpositive/union_remove_2.q.out @@ -117,12 +117,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union_remove_20.q.out ql/src/test/results/clientpositive/union_remove_20.q.out index bc90c08..b4ba781 100644 --- ql/src/test/results/clientpositive/union_remove_20.q.out +++ ql/src/test/results/clientpositive/union_remove_20.q.out @@ -83,12 +83,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -135,12 +135,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union_remove_21.q.out ql/src/test/results/clientpositive/union_remove_21.q.out index 5734786..3cb3312 100644 --- ql/src/test/results/clientpositive/union_remove_21.q.out +++ ql/src/test/results/clientpositive/union_remove_21.q.out @@ -83,12 +83,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -135,12 +135,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union_remove_22.q.out ql/src/test/results/clientpositive/union_remove_22.q.out index b835523..439e5a2 100644 --- ql/src/test/results/clientpositive/union_remove_22.q.out +++ ql/src/test/results/clientpositive/union_remove_22.q.out @@ -81,12 +81,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -133,12 +133,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -277,12 +277,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -329,12 +329,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union_remove_23.q.out ql/src/test/results/clientpositive/union_remove_23.q.out index dd82b4f..d4f1468 100644 --- ql/src/test/results/clientpositive/union_remove_23.q.out +++ ql/src/test/results/clientpositive/union_remove_23.q.out @@ -86,12 +86,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union_remove_24.q.out ql/src/test/results/clientpositive/union_remove_24.q.out index 38a68cf..5d1f4fa 100644 --- ql/src/test/results/clientpositive/union_remove_24.q.out +++ ql/src/test/results/clientpositive/union_remove_24.q.out @@ -79,12 +79,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -131,12 +131,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union_remove_25.q.out ql/src/test/results/clientpositive/union_remove_25.q.out index 661be8b..3b20e15 100644 --- ql/src/test/results/clientpositive/union_remove_25.q.out +++ ql/src/test/results/clientpositive/union_remove_25.q.out @@ -97,12 +97,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -151,12 +151,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union_remove_4.q.out ql/src/test/results/clientpositive/union_remove_4.q.out index f27f6b7..f139595 100644 --- ql/src/test/results/clientpositive/union_remove_4.q.out +++ ql/src/test/results/clientpositive/union_remove_4.q.out @@ -86,12 +86,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -177,12 +177,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union_remove_5.q.out ql/src/test/results/clientpositive/union_remove_5.q.out index a6d7b3d..6841f43 100644 --- ql/src/test/results/clientpositive/union_remove_5.q.out +++ ql/src/test/results/clientpositive/union_remove_5.q.out @@ -163,12 +163,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union_remove_6.q.out ql/src/test/results/clientpositive/union_remove_6.q.out index a9d2e8b..1b501af 100644 --- ql/src/test/results/clientpositive/union_remove_6.q.out +++ ql/src/test/results/clientpositive/union_remove_6.q.out @@ -85,12 +85,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -203,12 +203,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union_remove_7.q.out ql/src/test/results/clientpositive/union_remove_7.q.out index 6a28b95..679b4d7 100644 --- ql/src/test/results/clientpositive/union_remove_7.q.out +++ ql/src/test/results/clientpositive/union_remove_7.q.out @@ -85,12 +85,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator @@ -137,12 +137,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union_remove_8.q.out ql/src/test/results/clientpositive/union_remove_8.q.out index a688cff..529105b 100644 --- ql/src/test/results/clientpositive/union_remove_8.q.out +++ ql/src/test/results/clientpositive/union_remove_8.q.out @@ -121,12 +121,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator diff --git ql/src/test/results/clientpositive/union_remove_9.q.out ql/src/test/results/clientpositive/union_remove_9.q.out index 3ab6ed5..9b4168e 100644 --- ql/src/test/results/clientpositive/union_remove_9.q.out +++ ql/src/test/results/clientpositive/union_remove_9.q.out @@ -183,12 +183,12 @@ STAGE PLANS: keys: key (type: string) mode: hash outputColumnNames: _col0, _col1 - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE value expressions: _col1 (type: bigint) Reduce Operator Tree: Group By Operator