Index: ql/src/test/results/clientpositive/auto_join13.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join13.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/auto_join13.q.out (working copy) @@ -29,14 +29,14 @@ Stage: Stage-7 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$hdt$_0:src + $hdt$_0:$hdt$_0:src Fetch Operator limit: -1 - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:src + $hdt$_0:$hdt$_1:$hdt$_1:src Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$hdt$_0:src + $hdt$_0:$hdt$_0:src TableScan alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -51,7 +51,7 @@ keys: 0 UDFToDouble(_col0) (type: double) 1 (UDFToDouble(_col2) + UDFToDouble(_col0)) (type: double) - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:src + $hdt$_0:$hdt$_1:$hdt$_1:src TableScan alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Index: ql/src/test/results/clientpositive/groupby_ppr.q.out =================================================================== --- ql/src/test/results/clientpositive/groupby_ppr.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/groupby_ppr.q.out (working copy) @@ -215,8 +215,8 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_0:src] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:$hdt$_0:src] + /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:src] + /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:src] Needs Tagging: false Reduce Operator Tree: Group By Operator Index: ql/src/test/results/clientpositive/join29.q.out =================================================================== --- ql/src/test/results/clientpositive/join29.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/join29.q.out (working copy) @@ -84,11 +84,11 @@ Stage: Stage-8 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$INTNAME1 + $INTNAME1 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$INTNAME1 + $INTNAME1 TableScan HashTable Sink Operator keys: @@ -135,11 +135,11 @@ Stage: Stage-9 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$INTNAME + $INTNAME Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$INTNAME + $INTNAME TableScan HashTable Sink Operator keys: Index: ql/src/test/results/clientpositive/annotate_stats_select.q.out =================================================================== --- ql/src/test/results/clientpositive/annotate_stats_select.q.out (revision 1669675) +++ ql/src/test/results/clientpositive/annotate_stats_select.q.out (working copy) @@ -556,10 +556,10 @@ Select Operator expressions: array(1,2,3) (type: array) outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 240 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 240 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -591,10 +591,10 @@ Select Operator expressions: str_to_map('a=1 b=2 c=3',' ','=') (type: map) outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 1840 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 1508 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 1840 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 1508 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/auto_join22.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join22.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/auto_join22.q.out (working copy) @@ -13,14 +13,14 @@ Stage: Stage-8 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:src4 + $hdt$_0:$hdt$_0:$hdt$_0:src4 Fetch Operator limit: -1 - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:src4 + $hdt$_0:$hdt$_1:src4 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:src4 + $hdt$_0:$hdt$_0:$hdt$_0:src4 TableScan alias: src4 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -35,7 +35,7 @@ keys: 0 _col0 (type: string) 1 _col0 (type: string) - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:src4 + $hdt$_0:$hdt$_1:src4 TableScan alias: src4 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Index: ql/src/test/results/clientpositive/pcr.q.out =================================================================== --- ql/src/test/results/clientpositive/pcr.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/pcr.q.out (working copy) @@ -475,9 +475,9 @@ name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1] - /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1] - /pcr_t1/ds=2000-04-10 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-08 [pcr_t1] + /pcr_t1/ds=2000-04-09 [pcr_t1] + /pcr_t1/ds=2000-04-10 [pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -1798,8 +1798,8 @@ name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1] - /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-08 [pcr_t1] + /pcr_t1/ds=2000-04-09 [pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -2003,8 +2003,8 @@ name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1] - /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-08 [pcr_t1] + /pcr_t1/ds=2000-04-09 [pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -2293,9 +2293,9 @@ name: default.pcr_t1 name: default.pcr_t1 Truncated Path -> Alias: - /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1] - /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1] - /pcr_t1/ds=2000-04-10 [$hdt$_0:pcr_t1] + /pcr_t1/ds=2000-04-08 [pcr_t1] + /pcr_t1/ds=2000-04-09 [pcr_t1] + /pcr_t1/ds=2000-04-10 [pcr_t1] Needs Tagging: false Reduce Operator Tree: Select Operator @@ -5325,7 +5325,7 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=11 [srcpart] Needs Tagging: false Reduce Operator Tree: Select Operator Index: ql/src/test/results/clientpositive/join33.q.out =================================================================== --- ql/src/test/results/clientpositive/join33.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/join33.q.out (working copy) @@ -109,14 +109,14 @@ Stage: Stage-7 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:y + $hdt$_0:y Fetch Operator limit: -1 - $hdt$_0:$hdt$_1:$hdt$_2:x + $hdt$_1:$hdt$_2:x Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:y + $hdt$_0:y TableScan alias: y Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -134,7 +134,7 @@ 0 _col0 (type: string) 1 _col3 (type: string) Position of Big Table: 1 - $hdt$_0:$hdt$_1:$hdt$_2:x + $hdt$_1:$hdt$_2:x TableScan alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -356,7 +356,7 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_1:$hdt$_1:z] + /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:$hdt$_1:z] Stage: Stage-0 Move Operator Index: ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out =================================================================== --- ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out (working copy) @@ -64,14 +64,14 @@ Stage: Stage-6 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:src + $hdt$_0:src Fetch Operator limit: -1 - $hdt$_0:$hdt$_1:$hdt$_2:src1 + $hdt$_1:$hdt$_2:src1 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:src + $hdt$_0:src TableScan alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -89,7 +89,7 @@ 0 _col0 (type: string) 1 _col1 (type: string) Position of Big Table: 1 - $hdt$_0:$hdt$_1:$hdt$_2:src1 + $hdt$_1:$hdt$_2:src1 TableScan alias: src1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -444,10 +444,10 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_1:$hdt$_1:srcpart] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:$hdt$_1:$hdt$_1:srcpart] - /srcpart/ds=2008-04-09/hr=11 [$hdt$_0:$hdt$_1:$hdt$_1:srcpart] - /srcpart/ds=2008-04-09/hr=12 [$hdt$_0:$hdt$_1:$hdt$_1:srcpart] + /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:$hdt$_1:srcpart] + /srcpart/ds=2008-04-08/hr=12 [$hdt$_1:$hdt$_1:srcpart] + /srcpart/ds=2008-04-09/hr=11 [$hdt$_1:$hdt$_1:srcpart] + /srcpart/ds=2008-04-09/hr=12 [$hdt$_1:$hdt$_1:srcpart] Stage: Stage-0 Fetch Operator @@ -470,14 +470,14 @@ Stage: Stage-6 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:src + $hdt$_0:src Fetch Operator limit: -1 - $hdt$_0:$hdt$_1:$hdt$_2:src1 + $hdt$_1:$hdt$_2:src1 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:src + $hdt$_0:src TableScan alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -492,7 +492,7 @@ keys: 0 _col0 (type: string) 1 _col1 (type: string) - $hdt$_0:$hdt$_1:$hdt$_2:src1 + $hdt$_1:$hdt$_2:src1 TableScan alias: src1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Index: ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out =================================================================== --- ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out (revision 1669497) +++ ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out (working copy) @@ -211,8 +211,8 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=11 [srcpart] + /srcpart/ds=2008-04-08/hr=12 [srcpart] Stage: Stage-0 Move Operator Index: ql/src/test/results/clientpositive/alter_partition_coltype.q.out =================================================================== --- ql/src/test/results/clientpositive/alter_partition_coltype.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/alter_partition_coltype.q.out (working copy) @@ -568,7 +568,7 @@ name: default.alter_coltype name: default.alter_coltype Truncated Path -> Alias: - /alter_coltype/dt=100/ts=3.0 [$hdt$_0:$hdt$_0:alter_coltype] + /alter_coltype/dt=100/ts=3.0 [$hdt$_0:alter_coltype] Needs Tagging: false Reduce Operator Tree: Group By Operator Index: ql/src/test/results/clientpositive/correlationoptimizer3.q.out =================================================================== --- ql/src/test/results/clientpositive/correlationoptimizer3.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/correlationoptimizer3.q.out (working copy) @@ -504,14 +504,14 @@ Stage: Stage-9 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x + $hdt$_0:$hdt$_0:$hdt$_1:x Fetch Operator limit: -1 - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:$hdt$_2:x + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x + $hdt$_0:$hdt$_0:$hdt$_1:x TableScan alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -526,7 +526,7 @@ keys: 0 _col0 (type: string) 1 _col0 (type: string) - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:$hdt$_2:x + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x TableScan alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -1208,14 +1208,14 @@ Stage: Stage-9 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x + $hdt$_0:$hdt$_0:$hdt$_1:x Fetch Operator limit: -1 - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:$hdt$_2:x + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x + $hdt$_0:$hdt$_0:$hdt$_1:x TableScan alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -1230,7 +1230,7 @@ keys: 0 _col0 (type: string) 1 _col0 (type: string) - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:$hdt$_2:x + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x TableScan alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Index: ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out =================================================================== --- ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out (working copy) @@ -316,7 +316,7 @@ name: default.fact_daily name: default.fact_daily Truncated Path -> Alias: - /fact_tz/ds=1/x=484 [$hdt$_0:fact_daily] + /fact_tz/ds=1/x=484 [fact_daily] Stage: Stage-0 Fetch Operator @@ -483,7 +483,7 @@ name: default.fact_daily name: default.fact_daily Truncated Path -> Alias: - /fact_tz/ds=1/x=484 [$hdt$_0:fact_daily] + /fact_tz/ds=1/x=484 [fact_daily] Stage: Stage-0 Fetch Operator @@ -630,7 +630,7 @@ name: default.fact_daily name: default.fact_daily Truncated Path -> Alias: - /fact_tz/ds=1/x=484 [$hdt$_0:$hdt$_0:fact_daily] + /fact_tz/ds=1/x=484 [$hdt$_0:fact_daily] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -822,7 +822,7 @@ name: default.fact_daily name: default.fact_daily Truncated Path -> Alias: - /fact_tz/ds=1/x=484 [$hdt$_0:$hdt$_0:fact_daily] + /fact_tz/ds=1/x=484 [$hdt$_0:fact_daily] Needs Tagging: false Reduce Operator Tree: Group By Operator Index: ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out =================================================================== --- ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out (working copy) @@ -290,8 +290,8 @@ name: default.fact_daily name: default.fact_daily Truncated Path -> Alias: - /fact_daily/ds=1/hr=4/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [$hdt$_0:fact_daily] - /fact_daily/ds=1/hr=4/key=484/value=val_484 [$hdt$_0:fact_daily] + /fact_daily/ds=1/hr=4/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [fact_daily] + /fact_daily/ds=1/hr=4/key=484/value=val_484 [fact_daily] Stage: Stage-0 Fetch Operator @@ -443,7 +443,7 @@ name: default.fact_daily name: default.fact_daily Truncated Path -> Alias: - /fact_daily/ds=1/hr=4/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [$hdt$_0:fact_daily] + /fact_daily/ds=1/hr=4/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [fact_daily] Stage: Stage-0 Fetch Operator @@ -662,8 +662,8 @@ name: default.fact_daily name: default.fact_daily Truncated Path -> Alias: - /fact_daily/ds=1/hr=4/key=238/value=val_238 [$hdt$_0:fact_daily] - /fact_daily/ds=1/hr=4/key=484/value=val_484 [$hdt$_0:fact_daily] + /fact_daily/ds=1/hr=4/key=238/value=val_238 [fact_daily] + /fact_daily/ds=1/hr=4/key=484/value=val_484 [fact_daily] Stage: Stage-0 Fetch Operator Index: ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_1.q.out =================================================================== --- ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_1.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_1.q.out (working copy) @@ -572,7 +572,7 @@ Map Reduce Map Operator Tree: TableScan - alias: $hdt$_0:$hdt$_0:default.default__lineitem_ix_lineitem_ix_lshipdate_idx__ + alias: $hdt$_0:default.default__lineitem_ix_lineitem_ix_lshipdate_idx__ Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: year(l_shipdate) (type: int), month(l_shipdate) (type: int), l_shipdate (type: string), _count_of_l_shipdate (type: bigint) @@ -2260,7 +2260,7 @@ Map Reduce Map Operator Tree: TableScan - alias: $hdt$_0:$hdt$_0:default.default__tblpart_tbl_part_index__ + alias: $hdt$_0:default.default__tblpart_tbl_part_index__ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: (key < 10) (type: boolean) Index: ql/src/test/results/clientpositive/groupby_map_ppr.q.out =================================================================== --- ql/src/test/results/clientpositive/groupby_map_ppr.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/groupby_map_ppr.q.out (working copy) @@ -222,8 +222,8 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_0:src] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:$hdt$_0:src] + /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:src] + /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:src] Needs Tagging: false Reduce Operator Tree: Group By Operator Index: ql/src/test/results/clientpositive/groupby_sort_6.q.out =================================================================== --- ql/src/test/results/clientpositive/groupby_sort_6.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/groupby_sort_6.q.out (working copy) @@ -425,7 +425,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1/ds=2 [$hdt$_0:$hdt$_0:t1] + /t1/ds=2 [$hdt$_0:t1] Needs Tagging: false Reduce Operator Tree: Group By Operator Index: ql/src/test/results/clientpositive/udf_reflect2.q.out =================================================================== --- ql/src/test/results/clientpositive/udf_reflect2.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/udf_reflect2.q.out (working copy) @@ -320,17 +320,13 @@ Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE GatherStats: false Select Operator - expressions: UDFToInteger(key) (type: int), value (type: string) - outputColumnNames: _col0, _col1 + expressions: UDFToInteger(key) (type: int), reflect2(UDFToInteger(key),'byteValue') (type: tinyint), reflect2(UDFToInteger(key),'shortValue') (type: smallint), reflect2(UDFToInteger(key),'intValue') (type: int), reflect2(UDFToInteger(key),'longValue') (type: bigint), reflect2(UDFToInteger(key),'floatValue') (type: float), reflect2(UDFToInteger(key),'doubleValue') (type: double), reflect2(UDFToInteger(key),'toString') (type: string), value (type: string), reflect2(value,'concat','_concat') (type: string), reflect2(value,'contains','86') (type: boolean), reflect2(value,'startsWith','v') (type: boolean), reflect2(value,'endsWith','6') (type: boolean), reflect2(value,'equals','val_86') (type: boolean), reflect2(value,'equalsIgnoreCase','VAL_86') (type: boolean), reflect2(value,'getBytes') (type: binary), reflect2(value,'indexOf','1') (type: int), reflect2(value,'lastIndexOf','1') (type: int), reflect2(value,'replace','val','VALUE') (type: string), reflect2(value,'substring',1) (type: string), reflect2(value,'substring',1,5) (type: string), reflect2(value,'toUpperCase') (type: string), reflect2(value,'trim') (type: string), 2013-02-15 19:41:20.0 (type: timestamp), 113 (type: int), 1 (type: int), 5 (type: int), 19 (type: int), 41 (type: int), 20 (type: int), 1360986080000 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: int), reflect2(_col0,'byteValue') (type: tinyint), reflect2(_col0,'shortValue') (type: smallint), reflect2(_col0,'intValue') (type: int), reflect2(_col0,'longValue') (type: bigint), reflect2(_col0,'floatValue') (type: float), reflect2(_col0,'doubleValue') (type: double), reflect2(_col0,'toString') (type: string), _col1 (type: string), reflect2(_col1,'concat','_concat') (type: string), reflect2(_col1,'contains','86') (type: boolean), reflect2(_col1,'startsWith','v') (type: boolean), reflect2(_col1,'endsWith','6') (type: boolean), reflect2(_col1,'equals','val_86') (type: boolean), reflect2(_col1,'equalsIgnoreCase','VAL_86') (type: boolean), reflect2(_col1,'getBytes') (type: binary), reflect2(_col1,'indexOf','1') (type: int), reflect2(_col1,'lastIndexOf','1') (type: int), reflect2(_col1,'replace','val','VALUE') (type: string), reflect2(_col1,'substring',1) (type: string), reflect2(_col1,'substring',1,5) (type: string), reflect2(_col1,'toUpperCase') (type: string), reflect2(_col1,'trim') (type: string), 2013-02-15 19:41:20.0 (type: timestamp), 113 (type: int), 1 (type: int), 5 (type: int), 19 (type: int), 41 (type: int), 20 (type: int), 1360986080000 (type: bigint) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Limit - Number of rows: 5 - Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE - ListSink + Limit + Number of rows: 5 + Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE + ListSink PREHOOK: query: SELECT key, reflect2(key, "byteValue"), Index: ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out =================================================================== --- ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out (working copy) @@ -239,8 +239,8 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_0:src] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:$hdt$_0:src] + /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:src] + /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:src] Needs Tagging: false Reduce Operator Tree: Group By Operator Index: ql/src/test/results/clientpositive/auto_join10.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join10.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/auto_join10.q.out (working copy) @@ -23,11 +23,11 @@ Stage: Stage-5 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$hdt$_0:src + $hdt$_0:$hdt$_0:src Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$hdt$_0:src + $hdt$_0:$hdt$_0:src TableScan alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Index: ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.7.out =================================================================== --- ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.7.out (revision 1669497) +++ ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.7.out (working copy) @@ -297,8 +297,8 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=11 [srcpart] + /srcpart/ds=2008-04-08/hr=12 [srcpart] Stage: Stage-0 Move Operator Index: ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out =================================================================== --- ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out (revision 1669497) +++ ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out (working copy) @@ -166,7 +166,7 @@ name: default.src name: default.src Truncated Path -> Alias: - /src [$hdt$_0:src] + /src [src] Stage: Stage-0 Move Operator Index: ql/src/test/results/clientpositive/join35.q.out =================================================================== --- ql/src/test/results/clientpositive/join35.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/join35.q.out (working copy) @@ -232,7 +232,7 @@ name: default.src name: default.src Truncated Path -> Alias: - /src [$hdt$_0-subquery1:$hdt$_0-subquery1:$hdt$_0:$hdt$_0:x] + /src [$hdt$_0-subquery1:$hdt$_0-subquery1:$hdt$_0:x] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -569,7 +569,7 @@ name: default.src name: default.src Truncated Path -> Alias: - /src [$hdt$_0-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_0:x] + /src [$hdt$_0-subquery2:$hdt$_0-subquery2:$hdt$_0:x] Needs Tagging: false Reduce Operator Tree: Group By Operator Index: ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out =================================================================== --- ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out (working copy) @@ -197,8 +197,8 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=11 [srcpart] + /srcpart/ds=2008-04-08/hr=12 [srcpart] Stage: Stage-0 Fetch Operator @@ -521,10 +521,10 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-09/hr=11 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-09/hr=12 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=11 [srcpart] + /srcpart/ds=2008-04-08/hr=12 [srcpart] + /srcpart/ds=2008-04-09/hr=11 [srcpart] + /srcpart/ds=2008-04-09/hr=12 [srcpart] Stage: Stage-0 Fetch Operator Index: ql/src/test/results/clientpositive/list_bucket_dml_1.q.out =================================================================== --- ql/src/test/results/clientpositive/list_bucket_dml_1.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/list_bucket_dml_1.q.out (working copy) @@ -207,8 +207,8 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=11 [srcpart] + /srcpart/ds=2008-04-08/hr=12 [srcpart] Stage: Stage-0 Move Operator @@ -523,7 +523,7 @@ name: default.list_bucketing_dynamic_part name: default.list_bucketing_dynamic_part Truncated Path -> Alias: - /list_bucketing_dynamic_part/ds=2008-04-08/hr=11/key=484 [$hdt$_0:list_bucketing_dynamic_part] + /list_bucketing_dynamic_part/ds=2008-04-08/hr=11/key=484 [list_bucketing_dynamic_part] Stage: Stage-0 Fetch Operator Index: ql/src/test/results/clientpositive/subquery_in.q.out =================================================================== --- ql/src/test/results/clientpositive/subquery_in.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/subquery_in.q.out (working copy) @@ -254,36 +254,32 @@ TableScan alias: part Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: p_mfgr (type: string), p_size (type: int) - outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: p_mfgr (type: string), p_size (type: int) + sort order: ++ + Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int) - sort order: ++ - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) - outputColumnNames: _col0, _col1 + outputColumnNames: _col2, _col5 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: string, _col1: int + output shape: _col2: string, _col5: int type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 - partition by: _col0 + order by: _col5 + partition by: _col2 raw input shape: window functions: window function definition alias: _wcol0 - arguments: _col1 + arguments: _col5 name: rank window function: GenericUDAFRankEvaluator window frame: PRECEDING(MAX)~FOLLOWING(MAX) @@ -293,7 +289,7 @@ predicate: (_wcol0 <= 2) (type: boolean) Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: int) + expressions: _col5 (type: int) outputColumnNames: _col0 Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -438,46 +434,42 @@ TableScan alias: b Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: p_mfgr (type: string), p_size (type: int) - outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: p_mfgr (type: string), p_size (type: int) + sort order: ++ + Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int) - sort order: ++ - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) - outputColumnNames: _col0, _col1 + outputColumnNames: _col2, _col5 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: string, _col1: int + output shape: _col2: string, _col5: int type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 - partition by: _col0 + order by: _col5 + partition by: _col2 raw input shape: window functions: window function definition alias: _wcol0 - arguments: _col1 + arguments: _col5 name: rank window function: GenericUDAFRankEvaluator window frame: PRECEDING(MAX)~FOLLOWING(MAX) isPivotResult: true Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_wcol0 <= 2) and _col0 is not null) (type: boolean) + predicate: ((_wcol0 <= 2) and _col2 is not null) (type: boolean) Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: int) + expressions: _col2 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Group By Operator Index: ql/src/test/results/clientpositive/auto_join1.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join1.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/auto_join1.q.out (working copy) @@ -24,11 +24,11 @@ Stage: Stage-5 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:src1 + $hdt$_0:src1 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:src1 + $hdt$_0:src1 TableScan alias: src1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Index: ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out =================================================================== --- ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out (working copy) @@ -189,7 +189,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [$hdt$_0:$hdt$_0:t1] + /t1 [$hdt$_0:t1] Stage: Stage-7 Conditional Operator @@ -527,7 +527,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [$hdt$_0:$hdt$_0:t1] + /t1 [$hdt$_0:t1] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -849,7 +849,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [$hdt$_0:$hdt$_0:t1] + /t1 [$hdt$_0:t1] Stage: Stage-7 Conditional Operator @@ -1257,7 +1257,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [$hdt$_0:$hdt$_0:t1] + /t1 [$hdt$_0:t1] Stage: Stage-7 Conditional Operator @@ -1548,9 +1548,13 @@ STAGE DEPENDENCIES: Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - Stage-3 depends on stages: Stage-0 + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 + Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 STAGE PLANS: Stage: Stage-1 @@ -1567,17 +1571,39 @@ Group By Operator aggregations: count(1) keys: 1 (type: int), _col1 (type: string) - mode: hash + mode: final outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col2 (type: bigint) - auto parallelism: false + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,cnt + columns.comments + columns.types int:int:int +#### A masked pattern was here #### + name default.outputtbl3 + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl3 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: @@ -1630,90 +1656,122 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [$hdt$_0:$hdt$_0:t1] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: int), KEY._col1 (type: string) - mode: partials - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 + /t1 [$hdt$_0:t1] + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true #### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2 - columns.types int,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false + Stage: Stage-0 + Move Operator + tables: + replace: true +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,cnt + columns.comments + columns.types int:int:int +#### A masked pattern was here #### + name default.outputtbl3 + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl3 + Stage: Stage-2 + Stats-Aggr Operator +#### A masked pattern was here #### + + Stage: Stage-3 Map Reduce Map Operator Tree: TableScan GatherStats: false - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col2 (type: bigint) - auto parallelism: false + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,cnt + columns.comments + columns.types int:int:int +#### A masked pattern was here #### + name default.outputtbl3 + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl3 + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: #### A masked pattern was here #### Partition - base file name: -mr-10001 - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - columns _col0,_col1,_col2 - columns.types int,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + bucket_count -1 + columns key1,key2,cnt + columns.comments + columns.types int:int:int +#### A masked pattern was here #### + name default.outputtbl3 + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - columns _col0,_col1,_col2 - columns.types int,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + bucket_count -1 + columns key1,key2,cnt + columns.comments + columns.types int:int:int +#### A masked pattern was here #### + name default.outputtbl3 + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl3 + name: default.outputtbl3 Truncated Path -> Alias: #### A masked pattern was here #### - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: int), KEY._col1 (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + GatherStats: false File Output Operator compressed: false - GlobalTableId: 1 + GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1731,15 +1789,29 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl3 TotalFiles: 1 - GatherStats: true + GatherStats: false MultiFileSpray: false - - Stage: Stage-0 - Move Operator - tables: - replace: true + Path -> Alias: #### A masked pattern was here #### - table: + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,cnt + columns.comments + columns.types int:int:int +#### A masked pattern was here #### + name default.outputtbl3 + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -1755,9 +1827,14 @@ #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl3 + name: default.outputtbl3 + Truncated Path -> Alias: +#### A masked pattern was here #### - Stage: Stage-3 - Stats-Aggr Operator + Stage: Stage-6 + Move Operator + files: + hdfs directory: true #### A masked pattern was here #### PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3 @@ -1921,7 +1998,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [$hdt$_0:$hdt$_0:t1] + /t1 [$hdt$_0:t1] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -2207,7 +2284,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [$hdt$_0:$hdt$_0:t1] + /t1 [$hdt$_0:t1] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -2539,7 +2616,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [$hdt$_0:$hdt$_0:$hdt$_0:t1] + /t1 [$hdt$_0:$hdt$_0:t1] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -2951,7 +3028,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [null-subquery1:$hdt$_0-subquery1:$hdt$_0:$hdt$_0:t1, null-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_0:t1] + /t1 [null-subquery1:$hdt$_0-subquery1:$hdt$_0:t1, null-subquery2:$hdt$_0-subquery2:$hdt$_0:t1] Stage: Stage-7 Conditional Operator @@ -3377,7 +3454,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [null-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_0:t1] + /t1 [null-subquery2:$hdt$_0-subquery2:$hdt$_0:t1] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -3637,7 +3714,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [null-subquery1:$hdt$_0-subquery1:$hdt$_0:$hdt$_0:t1] + /t1 [null-subquery1:$hdt$_0-subquery1:$hdt$_0:t1] #### A masked pattern was here #### Stage: Stage-8 @@ -4761,7 +4838,7 @@ name: default.t2 name: default.t2 Truncated Path -> Alias: - /t2 [$hdt$_0:$hdt$_0:t2] + /t2 [$hdt$_0:t2] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -4973,9 +5050,13 @@ STAGE DEPENDENCIES: Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - Stage-3 depends on stages: Stage-0 + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 + Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 STAGE PLANS: Stage: Stage-1 @@ -4992,17 +5073,44 @@ Group By Operator aggregations: count(1) keys: _col0 (type: string), 1 (type: int), _col2 (type: string) - mode: hash + mode: final outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) - sort order: +++ - Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col3 (type: bigint) - auto parallelism: false + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: @@ -5055,90 +5163,142 @@ name: default.t2 name: default.t2 Truncated Path -> Alias: - /t2 [$hdt$_0:$hdt$_0:t2] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) - mode: partials - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 + /t2 [$hdt$_0:t2] + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true #### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string,int,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false + Stage: Stage-0 + Move Operator + tables: + replace: true +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + Stage: Stage-2 + Stats-Aggr Operator +#### A masked pattern was here #### + + Stage: Stage-3 Map Reduce Map Operator Tree: TableScan GatherStats: false - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) - sort order: +++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col3 (type: bigint) - auto parallelism: false + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: #### A masked pattern was here #### Partition - base file name: -mr-10001 - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - columns _col0,_col1,_col2,_col3 - columns.types string,int,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - columns _col0,_col1,_col2,_col3 - columns.types string,int,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + name: default.outputtbl4 Truncated Path -> Alias: #### A masked pattern was here #### - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + GatherStats: false File Output Operator compressed: false - GlobalTableId: 1 + GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -5161,15 +5321,34 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl4 TotalFiles: 1 - GatherStats: true + GatherStats: false MultiFileSpray: false - - Stage: Stage-0 - Move Operator - tables: - replace: true + Path -> Alias: #### A masked pattern was here #### - table: + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -5190,9 +5369,14 @@ #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl4 + name: default.outputtbl4 + Truncated Path -> Alias: +#### A masked pattern was here #### - Stage: Stage-3 - Stats-Aggr Operator + Stage: Stage-6 + Move Operator + files: + hdfs directory: true #### A masked pattern was here #### PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 @@ -5281,9 +5465,13 @@ STAGE DEPENDENCIES: Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - Stage-3 depends on stages: Stage-0 + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 + Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 STAGE PLANS: Stage: Stage-1 @@ -5300,17 +5488,39 @@ Group By Operator aggregations: count(1) keys: _col0 (type: string), 1 (type: int), _col2 (type: string), 2 (type: int) - mode: hash + mode: final outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string), _col3 (type: int) - sort order: ++++ - Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col4 (type: bigint) - auto parallelism: false + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), _col3 (type: int), UDFToInteger(_col4) (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,key3,key4,cnt + columns.comments + columns.types int:int:string:int:int +#### A masked pattern was here #### + name default.outputtbl5 + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl5 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: @@ -5363,90 +5573,122 @@ name: default.t2 name: default.t2 Truncated Path -> Alias: - /t2 [$hdt$_0:$hdt$_0:t2] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string), KEY._col3 (type: int) - mode: partials - outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 + /t2 [$hdt$_0:t2] + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true #### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2,_col3,_col4 - columns.types string,int,string,int,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false + Stage: Stage-0 + Move Operator + tables: + replace: true +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,key3,key4,cnt + columns.comments + columns.types int:int:string:int:int +#### A masked pattern was here #### + name default.outputtbl5 + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl5 + Stage: Stage-2 + Stats-Aggr Operator +#### A masked pattern was here #### + + Stage: Stage-3 Map Reduce Map Operator Tree: TableScan GatherStats: false - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string), _col3 (type: int) - sort order: ++++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string), _col3 (type: int) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col4 (type: bigint) - auto parallelism: false + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,key3,key4,cnt + columns.comments + columns.types int:int:string:int:int +#### A masked pattern was here #### + name default.outputtbl5 + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl5 + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: #### A masked pattern was here #### Partition - base file name: -mr-10001 - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4 - columns.types string,int,string,int,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + bucket_count -1 + columns key1,key2,key3,key4,cnt + columns.comments + columns.types int:int:string:int:int +#### A masked pattern was here #### + name default.outputtbl5 + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4 - columns.types string,int,string,int,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + bucket_count -1 + columns key1,key2,key3,key4,cnt + columns.comments + columns.types int:int:string:int:int +#### A masked pattern was here #### + name default.outputtbl5 + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl5 + name: default.outputtbl5 Truncated Path -> Alias: #### A masked pattern was here #### - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string), KEY._col3 (type: int) - mode: final - outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), _col3 (type: int), UDFToInteger(_col4) (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + GatherStats: false File Output Operator compressed: false - GlobalTableId: 1 + GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -5464,15 +5706,29 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl5 TotalFiles: 1 - GatherStats: true + GatherStats: false MultiFileSpray: false - - Stage: Stage-0 - Move Operator - tables: - replace: true + Path -> Alias: #### A masked pattern was here #### - table: + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,key3,key4,cnt + columns.comments + columns.types int:int:string:int:int +#### A masked pattern was here #### + name default.outputtbl5 + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -5488,9 +5744,14 @@ #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl5 + name: default.outputtbl5 + Truncated Path -> Alias: +#### A masked pattern was here #### - Stage: Stage-3 - Stats-Aggr Operator + Stage: Stage-6 + Move Operator + files: + hdfs directory: true #### A masked pattern was here #### PREHOOK: query: INSERT OVERWRITE TABLE outputTbl5 @@ -5593,9 +5854,13 @@ STAGE DEPENDENCIES: Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - Stage-3 depends on stages: Stage-0 + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 + Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 STAGE PLANS: Stage: Stage-1 @@ -5612,17 +5877,44 @@ Group By Operator aggregations: count(1) keys: _col0 (type: string), 1 (type: int), _col2 (type: string) - mode: hash + mode: final outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) - sort order: +++ - Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col3 (type: bigint) - auto parallelism: false + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: @@ -5675,90 +5967,142 @@ name: default.t2 name: default.t2 Truncated Path -> Alias: - /t2 [$hdt$_0:$hdt$_0:$hdt$_0:t2] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) - mode: partials - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 + /t2 [$hdt$_0:t2] + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true #### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string,int,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false + Stage: Stage-0 + Move Operator + tables: + replace: true +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + Stage: Stage-2 + Stats-Aggr Operator +#### A masked pattern was here #### + + Stage: Stage-3 Map Reduce Map Operator Tree: TableScan GatherStats: false - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) - sort order: +++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col3 (type: bigint) - auto parallelism: false + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: #### A masked pattern was here #### Partition - base file name: -mr-10001 - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - columns _col0,_col1,_col2,_col3 - columns.types string,int,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - columns _col0,_col1,_col2,_col3 - columns.types string,int,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + name: default.outputtbl4 Truncated Path -> Alias: #### A masked pattern was here #### - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + GatherStats: false File Output Operator compressed: false - GlobalTableId: 1 + GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -5781,15 +6125,34 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl4 TotalFiles: 1 - GatherStats: true + GatherStats: false MultiFileSpray: false - - Stage: Stage-0 - Move Operator - tables: - replace: true + Path -> Alias: #### A masked pattern was here #### - table: + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -5810,9 +6173,14 @@ #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl4 + name: default.outputtbl4 + Truncated Path -> Alias: +#### A masked pattern was here #### - Stage: Stage-3 - Stats-Aggr Operator + Stage: Stage-6 + Move Operator + files: + hdfs directory: true #### A masked pattern was here #### PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 @@ -5944,9 +6312,13 @@ STAGE DEPENDENCIES: Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - Stage-3 depends on stages: Stage-0 + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 + Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 STAGE PLANS: Stage: Stage-1 @@ -5962,19 +6334,45 @@ Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - bucketGroup: true keys: _col0 (type: string), 2 (type: int), _col2 (type: string) - mode: hash + mode: final outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) - sort order: +++ - Map-reduce partition columns: rand() (type: double) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col3 (type: bigint) - auto parallelism: false + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: @@ -6027,90 +6425,142 @@ name: default.t2 name: default.t2 Truncated Path -> Alias: - /t2 [$hdt$_0:$hdt$_0:$hdt$_0:t2] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) - mode: partials - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - GlobalTableId: 0 + /t2 [$hdt$_0:t2] + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true #### A masked pattern was here #### - NumFilesPerFileSink: 1 - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1,_col2,_col3 - columns.types string,int,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false + Stage: Stage-0 + Move Operator + tables: + replace: true +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + Stage: Stage-2 + Stats-Aggr Operator +#### A masked pattern was here #### + + Stage: Stage-3 Map Reduce Map Operator Tree: TableScan GatherStats: false - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) - sort order: +++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col3 (type: bigint) - auto parallelism: false + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: #### A masked pattern was here #### Partition - base file name: -mr-10001 - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - columns _col0,_col1,_col2,_col3 - columns.types string,int,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: - columns _col0,_col1,_col2,_col3 - columns.types string,int,string,bigint - escape.delim \ - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + name: default.outputtbl4 Truncated Path -> Alias: #### A masked pattern was here #### - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + GatherStats: false File Output Operator compressed: false - GlobalTableId: 1 + GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -6133,15 +6583,34 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl4 TotalFiles: 1 - GatherStats: true + GatherStats: false MultiFileSpray: false - - Stage: Stage-0 - Move Operator - tables: - replace: true + Path -> Alias: #### A masked pattern was here #### - table: + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -6162,9 +6631,14 @@ #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl4 + name: default.outputtbl4 + Truncated Path -> Alias: +#### A masked pattern was here #### - Stage: Stage-3 - Stats-Aggr Operator + Stage: Stage-6 + Move Operator + files: + hdfs directory: true #### A masked pattern was here #### PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 Index: ql/src/test/results/clientpositive/subquery_in_having.q.out =================================================================== --- ql/src/test/results/clientpositive/subquery_in_having.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/subquery_in_having.q.out (working copy) @@ -1332,37 +1332,33 @@ TableScan alias: part_subq Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int) - outputColumnNames: _col0, _col1, _col2 + Reduce Output Operator + key expressions: p_mfgr (type: string), p_size (type: int) + sort order: ++ + Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col1 (type: string), _col2 (type: int) - sort order: ++ - Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) + value expressions: p_name (type: string) Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) - outputColumnNames: _col0, _col1, _col2 + expressions: VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) + outputColumnNames: _col1, _col2, _col5 Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: string, _col1: string, _col2: int + output shape: _col1: string, _col2: string, _col5: int type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2 - partition by: _col1 + order by: _col5 + partition by: _col2 raw input shape: window functions: window function definition alias: _wcol0 - arguments: _col0 + arguments: _col1 name: first_value window function: GenericUDAFFirstValueEvaluator window frame: PRECEDING(MAX)~ Index: ql/src/test/results/clientpositive/mapjoin_subquery.q.out =================================================================== --- ql/src/test/results/clientpositive/mapjoin_subquery.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/mapjoin_subquery.q.out (working copy) @@ -27,14 +27,14 @@ Stage: Stage-6 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:z + $hdt$_0:z Fetch Operator limit: -1 - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x + $hdt$_1:$hdt$_2:x Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:z + $hdt$_0:z TableScan alias: z Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -49,7 +49,7 @@ keys: 0 _col0 (type: string) 1 _col0 (type: string) - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x + $hdt$_1:$hdt$_2:x TableScan alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -270,14 +270,14 @@ Stage: Stage-6 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:z + $hdt$_0:z Fetch Operator limit: -1 - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x + $hdt$_1:$hdt$_2:x Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:z + $hdt$_0:z TableScan alias: z Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -292,7 +292,7 @@ keys: 0 _col0 (type: string) 1 _col0 (type: string) - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x + $hdt$_1:$hdt$_2:x TableScan alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Index: ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out =================================================================== --- ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out (revision 1669497) +++ ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out (working copy) @@ -239,8 +239,8 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=11 [srcpart] + /srcpart/ds=2008-04-08/hr=12 [srcpart] Stage: Stage-0 Move Operator @@ -534,8 +534,8 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=11 [srcpart] + /srcpart/ds=2008-04-08/hr=12 [srcpart] Stage: Stage-7 Conditional Operator Index: ql/src/test/results/clientpositive/tez/subquery_in.q.out =================================================================== --- ql/src/test/results/clientpositive/tez/subquery_in.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/tez/subquery_in.q.out (working copy) @@ -293,15 +293,11 @@ TableScan alias: part Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: p_mfgr (type: string), p_size (type: int) - outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: p_mfgr (type: string), p_size (type: int) + sort order: ++ + Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int) - sort order: ++ - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Merge Join Operator @@ -323,24 +319,24 @@ Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) - outputColumnNames: _col0, _col1 + outputColumnNames: _col2, _col5 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: string, _col1: int + output shape: _col2: string, _col5: int type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 - partition by: _col0 + order by: _col5 + partition by: _col2 raw input shape: window functions: window function definition alias: _wcol0 - arguments: _col1 + arguments: _col5 name: rank window function: GenericUDAFRankEvaluator window frame: PRECEDING(MAX)~FOLLOWING(MAX) @@ -350,7 +346,7 @@ predicate: (_wcol0 <= 2) (type: boolean) Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: int) + expressions: _col5 (type: int) outputColumnNames: _col0 Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -465,15 +461,11 @@ TableScan alias: b Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: p_mfgr (type: string), p_size (type: int) - outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: p_mfgr (type: string), p_size (type: int) + sort order: ++ + Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int) - sort order: ++ - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reducer 2 Reduce Operator Tree: Merge Join Operator @@ -499,34 +491,34 @@ Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) - outputColumnNames: _col0, _col1 + outputColumnNames: _col2, _col5 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: string, _col1: int + output shape: _col2: string, _col5: int type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 - partition by: _col0 + order by: _col5 + partition by: _col2 raw input shape: window functions: window function definition alias: _wcol0 - arguments: _col1 + arguments: _col5 name: rank window function: GenericUDAFRankEvaluator window frame: PRECEDING(MAX)~FOLLOWING(MAX) isPivotResult: true Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_wcol0 <= 2) and _col0 is not null) (type: boolean) + predicate: ((_wcol0 <= 2) and _col2 is not null) (type: boolean) Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: int) + expressions: _col2 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Group By Operator Index: ql/src/test/results/clientpositive/windowing_streaming.q.out =================================================================== --- ql/src/test/results/clientpositive/windowing_streaming.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/windowing_streaming.q.out (working copy) @@ -65,43 +65,39 @@ TableScan alias: part Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: p_name (type: string), p_mfgr (type: string) - outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: p_mfgr (type: string), p_name (type: string) + sort order: ++ + Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col1 (type: string), _col0 (type: string) - sort order: ++ - Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string) - outputColumnNames: _col0, _col1 + outputColumnNames: _col1, _col2 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: string, _col1: string + output shape: _col1: string, _col2: string type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col0 - partition by: _col1 + order by: _col1 + partition by: _col2 raw input shape: window functions: window function definition alias: _wcol0 - arguments: _col0 + arguments: _col1 name: rank window function: GenericUDAFRankEvaluator window frame: PRECEDING(MAX)~FOLLOWING(MAX) isPivotResult: true Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _wcol0 (type: int) + expressions: _col2 (type: string), _wcol0 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -139,37 +135,33 @@ TableScan alias: part Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: p_name (type: string), p_mfgr (type: string) - outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: p_mfgr (type: string), p_name (type: string) + sort order: ++ + Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col1 (type: string), _col0 (type: string) - sort order: ++ - Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - TopN Hash Memory Usage: 0.8 + TopN Hash Memory Usage: 0.8 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string) - outputColumnNames: _col0, _col1 + outputColumnNames: _col1, _col2 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: string, _col1: string + output shape: _col1: string, _col2: string type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col0 - partition by: _col1 + order by: _col1 + partition by: _col2 raw input shape: window functions: window function definition alias: _wcol0 - arguments: _col0 + arguments: _col1 name: rank window function: GenericUDAFRankEvaluator window frame: PRECEDING(MAX)~FOLLOWING(MAX) @@ -179,7 +171,7 @@ predicate: (_wcol0 < 4) (type: boolean) Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _wcol0 (type: int) + expressions: _col2 (type: string), _wcol0 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -312,37 +304,33 @@ TableScan alias: alltypesorc Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: ctinyint (type: tinyint), cdouble (type: double) - outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: ctinyint (type: tinyint), cdouble (type: double) + sort order: ++ + Map-reduce partition columns: ctinyint (type: tinyint) Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: tinyint), _col1 (type: double) - sort order: ++ - Map-reduce partition columns: _col0 (type: tinyint) - Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE - TopN Hash Memory Usage: 0.8 + TopN Hash Memory Usage: 0.8 Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: double) - outputColumnNames: _col0, _col1 + outputColumnNames: _col0, _col5 Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: tinyint, _col1: double + output shape: _col0: tinyint, _col5: double type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 + order by: _col5 partition by: _col0 raw input shape: window functions: window function definition alias: _wcol0 - arguments: _col1 + arguments: _col5 name: rank window function: GenericUDAFRankEvaluator window frame: PRECEDING(MAX)~FOLLOWING(MAX) @@ -352,7 +340,7 @@ predicate: (_wcol0 < 5) (type: boolean) Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: tinyint), _col1 (type: double), _wcol0 (type: int) + expressions: _col0 (type: tinyint), _col5 (type: double), _wcol0 (type: int) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE File Output Operator Index: ql/src/test/results/clientpositive/auto_join26.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join26.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/auto_join26.q.out (working copy) @@ -28,11 +28,11 @@ Stage: Stage-6 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$hdt$_1:x + $hdt$_0:$hdt$_1:x Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$hdt$_1:x + $hdt$_0:$hdt$_1:x TableScan alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Index: ql/src/test/results/clientpositive/auto_join12.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join12.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/auto_join12.q.out (working copy) @@ -29,14 +29,14 @@ Stage: Stage-7 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:src + $hdt$_0:$hdt$_0:$hdt$_0:src Fetch Operator limit: -1 - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:src + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:src Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:src + $hdt$_0:$hdt$_0:$hdt$_0:src TableScan alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -51,7 +51,7 @@ keys: 0 _col0 (type: string) 1 _col0 (type: string) - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:src + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:src TableScan alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Index: ql/src/test/results/clientpositive/join28.q.out =================================================================== --- ql/src/test/results/clientpositive/join28.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/join28.q.out (working copy) @@ -40,14 +40,14 @@ Stage: Stage-7 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:z + $hdt$_0:z Fetch Operator limit: -1 - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x + $hdt$_1:$hdt$_2:x Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:z + $hdt$_0:z TableScan alias: z Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -62,7 +62,7 @@ keys: 0 _col0 (type: string) 1 _col0 (type: string) - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x + $hdt$_1:$hdt$_2:x TableScan alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Index: ql/src/test/results/clientpositive/combine2.q.out =================================================================== --- ql/src/test/results/clientpositive/combine2.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/combine2.q.out (working copy) @@ -564,14 +564,14 @@ name: default.combine2 name: default.combine2 Truncated Path -> Alias: - /combine2/value=2010-04-21 09%3A45%3A00 [$hdt$_0:$hdt$_0:combine2] - /combine2/value=val_0 [$hdt$_0:$hdt$_0:combine2] - /combine2/value=val_2 [$hdt$_0:$hdt$_0:combine2] - /combine2/value=val_4 [$hdt$_0:$hdt$_0:combine2] - /combine2/value=val_5 [$hdt$_0:$hdt$_0:combine2] - /combine2/value=val_8 [$hdt$_0:$hdt$_0:combine2] - /combine2/value=val_9 [$hdt$_0:$hdt$_0:combine2] - /combine2/value=| [$hdt$_0:$hdt$_0:combine2] + /combine2/value=2010-04-21 09%3A45%3A00 [$hdt$_0:combine2] + /combine2/value=val_0 [$hdt$_0:combine2] + /combine2/value=val_2 [$hdt$_0:combine2] + /combine2/value=val_4 [$hdt$_0:combine2] + /combine2/value=val_5 [$hdt$_0:combine2] + /combine2/value=val_8 [$hdt$_0:combine2] + /combine2/value=val_9 [$hdt$_0:combine2] + /combine2/value=| [$hdt$_0:combine2] Needs Tagging: false Reduce Operator Tree: Group By Operator Index: ql/src/test/results/clientpositive/join32.q.out =================================================================== --- ql/src/test/results/clientpositive/join32.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/join32.q.out (working copy) @@ -109,14 +109,14 @@ Stage: Stage-7 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:y + $hdt$_0:y Fetch Operator limit: -1 - $hdt$_0:$hdt$_1:$hdt$_2:x + $hdt$_1:$hdt$_2:x Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:y + $hdt$_0:y TableScan alias: y Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -134,7 +134,7 @@ 0 _col0 (type: string) 1 _col3 (type: string) Position of Big Table: 1 - $hdt$_0:$hdt$_1:$hdt$_2:x + $hdt$_1:$hdt$_2:x TableScan alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -356,7 +356,7 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_1:$hdt$_1:z] + /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:$hdt$_1:z] Stage: Stage-0 Move Operator Index: ql/src/test/results/clientpositive/input_part1.q.out =================================================================== --- ql/src/test/results/clientpositive/input_part1.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/input_part1.q.out (working copy) @@ -172,7 +172,7 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=12 [srcpart] Stage: Stage-7 Conditional Operator Index: ql/src/test/results/clientpositive/correlationoptimizer12.q.out =================================================================== --- ql/src/test/results/clientpositive/correlationoptimizer12.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/correlationoptimizer12.q.out (working copy) @@ -27,16 +27,12 @@ TableScan alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string) + value expressions: value (type: string) Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) @@ -116,16 +112,12 @@ TableScan alias: y Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: key (type: string) + sort order: + + Map-reduce partition columns: key (type: string) Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string) - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string) + value expressions: value (type: string) Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) Index: ql/src/test/results/clientpositive/multiMapJoin2.q.out =================================================================== --- ql/src/test/results/clientpositive/multiMapJoin2.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/multiMapJoin2.q.out (working copy) @@ -761,11 +761,11 @@ Stage: Stage-17 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1 + $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1 + $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1 TableScan alias: y1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -833,11 +833,11 @@ Stage: Stage-15 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$INTNAME1 + $hdt$_0:$INTNAME1 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$INTNAME1 + $hdt$_0:$INTNAME1 TableScan HashTable Sink Operator keys: @@ -917,11 +917,11 @@ Stage: Stage-16 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$INTNAME + $hdt$_0:$INTNAME Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$INTNAME + $hdt$_0:$INTNAME TableScan HashTable Sink Operator keys: @@ -993,11 +993,11 @@ Stage: Stage-18 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1 + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1 + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1 TableScan alias: y1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -1159,14 +1159,14 @@ Stage: Stage-9 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1 + $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1 Fetch Operator limit: -1 - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1 + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1 + $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1 TableScan alias: y1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -1181,7 +1181,7 @@ keys: 0 _col0 (type: string) 1 _col0 (type: string) - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1 + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1 TableScan alias: y1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -1488,11 +1488,11 @@ Stage: Stage-12 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$INTNAME1 + $hdt$_0:$INTNAME1 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$INTNAME1 + $hdt$_0:$INTNAME1 TableScan HashTable Sink Operator keys: @@ -1575,11 +1575,11 @@ Stage: Stage-13 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$INTNAME + $hdt$_0:$INTNAME Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$INTNAME + $hdt$_0:$INTNAME TableScan HashTable Sink Operator keys: @@ -1658,11 +1658,11 @@ Stage: Stage-14 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1 + $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1 + $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1 TableScan alias: x1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -1823,11 +1823,11 @@ Stage: Stage-7 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1 + $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1 + $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1 TableScan alias: x1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -2218,11 +2218,11 @@ Stage: Stage-15 Map Reduce Local Work Alias -> Map Local Tables: - null-subquery1:$hdt$_0-subquery1:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:a + null-subquery1:$hdt$_0-subquery1:$hdt$_1:$hdt$_1:$hdt$_1:a Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - null-subquery1:$hdt$_0-subquery1:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:a + null-subquery1:$hdt$_0-subquery1:$hdt$_1:$hdt$_1:$hdt$_1:a TableScan alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -2287,14 +2287,14 @@ Stage: Stage-14 Map Reduce Local Work Alias -> Map Local Tables: - null-subquery1:$hdt$_0-subquery1:$hdt$_0:$hdt$_0:a + null-subquery1:$hdt$_0-subquery1:$hdt$_0:a Fetch Operator limit: -1 - null-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_0:a + null-subquery2:$hdt$_0-subquery2:$hdt$_0:a Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - null-subquery1:$hdt$_0-subquery1:$hdt$_0:$hdt$_0:a + null-subquery1:$hdt$_0-subquery1:$hdt$_0:a TableScan alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -2309,7 +2309,7 @@ keys: 0 _col0 (type: string) 1 _col0 (type: string) - null-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_0:a + null-subquery2:$hdt$_0-subquery2:$hdt$_0:a TableScan alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -2370,11 +2370,11 @@ Stage: Stage-16 Map Reduce Local Work Alias -> Map Local Tables: - null-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:a + null-subquery2:$hdt$_0-subquery2:$hdt$_1:$hdt$_1:$hdt$_1:a Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - null-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:a + null-subquery2:$hdt$_0-subquery2:$hdt$_1:$hdt$_1:$hdt$_1:a TableScan alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Index: ql/src/test/results/clientpositive/list_bucket_dml_3.q.out =================================================================== --- ql/src/test/results/clientpositive/list_bucket_dml_3.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/list_bucket_dml_3.q.out (working copy) @@ -199,8 +199,8 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=11 [srcpart] + /srcpart/ds=2008-04-08/hr=12 [srcpart] Stage: Stage-0 Move Operator @@ -463,7 +463,7 @@ name: default.list_bucketing_static_part name: default.list_bucketing_static_part Truncated Path -> Alias: - /list_bucketing_static_part/ds=2008-04-08/hr=11/key=484 [$hdt$_0:list_bucketing_static_part] + /list_bucketing_static_part/ds=2008-04-08/hr=11/key=484 [list_bucketing_static_part] Stage: Stage-0 Fetch Operator Index: ql/src/test/results/clientpositive/unionDistinct_1.q.out =================================================================== --- ql/src/test/results/clientpositive/unionDistinct_1.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/unionDistinct_1.q.out (working copy) @@ -9089,7 +9089,7 @@ name: default.src5 name: default.src5 Truncated Path -> Alias: - /src5 [$hdt$_0-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_0:src5] + /src5 [$hdt$_0-subquery2:$hdt$_0-subquery2:$hdt$_0:src5] Needs Tagging: false Reduce Operator Tree: Group By Operator Index: ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out =================================================================== --- ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out (working copy) @@ -299,7 +299,7 @@ name: default.fact_daily name: default.fact_daily Truncated Path -> Alias: - /fact_tz/ds=1/x=484 [$hdt$_0:fact_daily] + /fact_tz/ds=1/x=484 [fact_daily] Stage: Stage-0 Fetch Operator @@ -446,7 +446,7 @@ name: default.fact_daily name: default.fact_daily Truncated Path -> Alias: - /fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [$hdt$_0:fact_daily] + /fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [fact_daily] Stage: Stage-0 Fetch Operator @@ -589,7 +589,7 @@ name: default.fact_daily name: default.fact_daily Truncated Path -> Alias: - /fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [$hdt$_0:fact_daily] + /fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [fact_daily] Stage: Stage-0 Fetch Operator Index: ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out =================================================================== --- ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out (revision 1669497) +++ ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out (working copy) @@ -395,7 +395,7 @@ name: default.list_bucketing_static_part name: default.list_bucketing_static_part Truncated Path -> Alias: - /list_bucketing_static_part/ds=2008-04-08/hr=11/value=val_466 [$hdt$_0:list_bucketing_static_part] + /list_bucketing_static_part/ds=2008-04-08/hr=11/value=val_466 [list_bucketing_static_part] Stage: Stage-0 Fetch Operator Index: ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out =================================================================== --- ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out (working copy) @@ -246,7 +246,7 @@ name: default.fact_daily name: default.fact_daily Truncated Path -> Alias: - /fact_daily/ds=1/hr=4/key=484/value=val_484 [$hdt$_0:fact_daily] + /fact_daily/ds=1/hr=4/key=484/value=val_484 [fact_daily] Stage: Stage-0 Fetch Operator @@ -406,7 +406,7 @@ name: default.fact_daily name: default.fact_daily Truncated Path -> Alias: - /fact_daily/ds=1/hr=4/key=238/value=val_238 [$hdt$_0:fact_daily] + /fact_daily/ds=1/hr=4/key=238/value=val_238 [fact_daily] Stage: Stage-0 Fetch Operator @@ -559,7 +559,7 @@ name: default.fact_daily name: default.fact_daily Truncated Path -> Alias: - /fact_daily/ds=1/hr=4/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [$hdt$_0:fact_daily] + /fact_daily/ds=1/hr=4/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [fact_daily] Stage: Stage-0 Fetch Operator @@ -713,7 +713,7 @@ name: default.fact_daily name: default.fact_daily Truncated Path -> Alias: - /fact_daily/ds=1/hr=4/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [$hdt$_0:fact_daily] + /fact_daily/ds=1/hr=4/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [fact_daily] Stage: Stage-0 Fetch Operator Index: ql/src/test/results/clientpositive/explain_logical.q.out =================================================================== --- ql/src/test/results/clientpositive/explain_logical.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/explain_logical.q.out (working copy) @@ -100,7 +100,7 @@ LOGICAL PLAN: -$hdt$_0:$hdt$_0:srcpart +$hdt$_0:srcpart TableScan (TS_0) alias: srcpart Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE @@ -108,25 +108,25 @@ expressions: key (type: string) outputColumnNames: _col0 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Group By Operator (GBY_5) + Group By Operator (GBY_4) aggregations: count(1) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator (RS_6) + Reduce Output Operator (RS_5) key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) - Group By Operator (GBY_7) + Group By Operator (GBY_6) aggregations: count(VALUE._col0) keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - File Output Operator (FS_9) + File Output Operator (FS_8) compressed: false Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE table: @@ -180,7 +180,7 @@ LOGICAL PLAN: -$hdt$_0:$hdt$_0:src +$hdt$_0:src TableScan (TS_0) alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -188,25 +188,25 @@ expressions: key (type: string) outputColumnNames: _col0 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Group By Operator (GBY_4) + Group By Operator (GBY_3) aggregations: count(1) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator (RS_5) + Reduce Output Operator (RS_4) key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint) - Group By Operator (GBY_6) + Group By Operator (GBY_5) aggregations: count(VALUE._col0) keys: KEY._col0 (type: string) mode: mergepartial outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - File Output Operator (FS_8) + File Output Operator (FS_7) compressed: false Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE table: @@ -286,16 +286,16 @@ expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Union (UNION_6) + Union (UNION_5) Statistics: Num rows: 2500 Data size: 26560 Basic stats: COMPLETE Column stats: NONE - File Output Operator (FS_8) + File Output Operator (FS_7) compressed: false Statistics: Num rows: 2500 Data size: 26560 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -null-subquery2:$hdt$_0-subquery2:$hdt$_0:srcpart +null-subquery2:$hdt$_0-subquery2:srcpart TableScan (TS_2) alias: srcpart Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE @@ -303,7 +303,7 @@ expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Union (UNION_6) + Union (UNION_5) Statistics: Num rows: 2500 Data size: 26560 Basic stats: COMPLETE Column stats: NONE PREHOOK: query: EXPLAIN LOGICAL @@ -357,11 +357,11 @@ LOGICAL PLAN: -$hdt$_0:$hdt$_0:s2 +$hdt$_0:s2 TableScan (TS_0) alias: s2 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Filter Operator (FIL_13) + Filter Operator (FIL_12) predicate: key is not null (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator (SEL_2) @@ -386,18 +386,18 @@ expressions: _col3 (type: string), _col1 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE - File Output Operator (FS_12) + File Output Operator (FS_11) compressed: false Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -$hdt$_0:$hdt$_1:s1 +$hdt$_1:s1 TableScan (TS_3) alias: s1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator (FIL_14) + Filter Operator (FIL_13) predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator (SEL_4) @@ -472,7 +472,7 @@ LOGICAL PLAN: -$hdt$_0:srcpart +srcpart TableScan (TS_0) alias: srcpart Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE @@ -480,7 +480,7 @@ expressions: ds (type: string), key (type: string), value (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - ListSink (OP_6) + ListSink (OP_5) PREHOOK: query: EXPLAIN LOGICAL SELECT * FROM V3 PREHOOK: type: QUERY @@ -503,23 +503,23 @@ LOGICAL PLAN: -$hdt$_0:$hdt$_0:srcpart +$hdt$_0:srcpart TableScan (TS_0) alias: srcpart Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Filter Operator (FIL_13) + Filter Operator (FIL_12) predicate: key is not null (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator (SEL_2) expressions: key (type: string) outputColumnNames: _col0 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator (RS_7) + Reduce Output Operator (RS_6) key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Join Operator (JOIN_10) + Join Operator (JOIN_9) condition map: Inner Join 0 to 1 keys: @@ -527,11 +527,11 @@ 1 _col0 (type: string) outputColumnNames: _col0, _col2 Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE - Select Operator (SEL_11) + Select Operator (SEL_10) expressions: _col0 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE - File Output Operator (FS_12) + File Output Operator (FS_11) compressed: false Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE table: @@ -539,23 +539,23 @@ output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe $hdt$_1:src2 - TableScan (TS_4) + TableScan (TS_3) alias: src2 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator (FIL_14) + Filter Operator (FIL_13) predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - Select Operator (SEL_5) + Select Operator (SEL_4) expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator (RS_9) + Reduce Output Operator (RS_8) key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) - Join Operator (JOIN_10) + Join Operator (JOIN_9) condition map: Inner Join 0 to 1 keys: @@ -585,24 +585,24 @@ LOGICAL PLAN: -$hdt$_0:$hdt$_0:$hdt$_0:srcpart +$hdt$_0:srcpart TableScan (TS_0) alias: srcpart Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE - Filter Operator (FIL_18) + Filter Operator (FIL_16) predicate: key is not null (type: boolean) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE Select Operator (SEL_2) expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator (RS_9) + Reduce Output Operator (RS_8) key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) - Join Operator (JOIN_14) + Join Operator (JOIN_13) condition map: Inner Join 0 to 1 Inner Join 0 to 2 @@ -612,34 +612,34 @@ 2 _col0 (type: string) outputColumnNames: _col1, _col2, _col4 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE - Select Operator (SEL_15) + Select Operator (SEL_14) expressions: _col2 (type: string), _col1 (type: string), _col4 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE - File Output Operator (FS_17) + File Output Operator (FS_15) compressed: false Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -$hdt$_0:$hdt$_1:src - TableScan (TS_4) +$hdt$_1:src + TableScan (TS_3) alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator (FIL_19) + Filter Operator (FIL_17) predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - Select Operator (SEL_5) + Select Operator (SEL_4) expressions: key (type: string) outputColumnNames: _col0 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator (RS_11) + Reduce Output Operator (RS_10) key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - Join Operator (JOIN_14) + Join Operator (JOIN_13) condition map: Inner Join 0 to 1 Inner Join 0 to 2 @@ -649,24 +649,24 @@ 2 _col0 (type: string) outputColumnNames: _col1, _col2, _col4 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE -$hdt$_0:$hdt$_2:src - TableScan (TS_6) +$hdt$_2:src + TableScan (TS_5) alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator (FIL_20) + Filter Operator (FIL_18) predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - Select Operator (SEL_7) + Select Operator (SEL_6) expressions: key (type: string), value (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator (RS_13) + Reduce Output Operator (RS_12) key expressions: _col0 (type: string) sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: string) - Join Operator (JOIN_14) + Join Operator (JOIN_13) condition map: Inner Join 0 to 1 Inner Join 0 to 2 @@ -800,11 +800,11 @@ LOGICAL PLAN: -$hdt$_0:$hdt$_0:src +$hdt$_0:src TableScan (TS_0) alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator (FIL_19) + Filter Operator (FIL_18) predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator (SEL_1) @@ -829,27 +829,27 @@ expressions: _col0 (type: string), _col3 (type: bigint), _col1 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator (RS_16) + Reduce Output Operator (RS_15) key expressions: _col0 (type: string) sort order: + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE value expressions: _col1 (type: bigint), _col2 (type: string) - Select Operator (SEL_17) + Select Operator (SEL_16) expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint), VALUE._col1 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE - File Output Operator (FS_18) + File Output Operator (FS_17) compressed: false Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe -$hdt$_0:$hdt$_1:$hdt$_1:src +$hdt$_1:$hdt$_1:src TableScan (TS_2) alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Filter Operator (FIL_20) + Filter Operator (FIL_19) predicate: key is not null (type: boolean) Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE Select Operator (SEL_3) Index: ql/src/test/results/clientpositive/ppd_vc.q.out =================================================================== --- ql/src/test/results/clientpositive/ppd_vc.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/ppd_vc.q.out (working copy) @@ -258,10 +258,10 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-09/hr=11 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-09/hr=12 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=11 [srcpart] + /srcpart/ds=2008-04-08/hr=12 [srcpart] + /srcpart/ds=2008-04-09/hr=11 [srcpart] + /srcpart/ds=2008-04-09/hr=12 [srcpart] Stage: Stage-0 Fetch Operator Index: ql/src/test/results/clientpositive/auto_join14.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join14.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/auto_join14.q.out (working copy) @@ -28,11 +28,11 @@ Stage: Stage-5 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_1:src + $hdt$_1:src Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_1:src + $hdt$_1:src TableScan alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Index: ql/src/test/results/clientpositive/load_dyn_part14.q.out =================================================================== --- ql/src/test/results/clientpositive/load_dyn_part14.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/load_dyn_part14.q.out (working copy) @@ -76,22 +76,22 @@ Select Operator expressions: 'k1' (type: string), UDFToString(null) (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 135000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 85000 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 2 - Statistics: Num rows: 2 Data size: 540 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator sort order: - Statistics: Num rows: 2 Data size: 540 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: string), _col1 (type: string) Reduce Operator Tree: Select Operator expressions: VALUE._col0 (type: string), VALUE._col1 (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 540 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 2 - Statistics: Num rows: 2 Data size: 540 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false table: @@ -104,10 +104,10 @@ Map Operator Tree: TableScan Union - Statistics: Num rows: 6 Data size: 1222 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 6 Data size: 1022 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 1222 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 6 Data size: 1022 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -115,10 +115,10 @@ name: default.nzhang_part14 TableScan Union - Statistics: Num rows: 6 Data size: 1222 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 6 Data size: 1022 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 1222 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 6 Data size: 1022 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -126,10 +126,10 @@ name: default.nzhang_part14 TableScan Union - Statistics: Num rows: 6 Data size: 1222 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 6 Data size: 1022 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 1222 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 6 Data size: 1022 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat Index: ql/src/test/results/clientpositive/union24.q.out =================================================================== --- ql/src/test/results/clientpositive/union24.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/union24.q.out (working copy) @@ -262,7 +262,7 @@ name: default.src5 name: default.src5 Truncated Path -> Alias: - /src5 [null-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_0:src5] + /src5 [null-subquery2:$hdt$_0-subquery2:$hdt$_0:src5] Needs Tagging: false Reduce Operator Tree: Group By Operator Index: ql/src/test/results/clientpositive/table_access_keys_stats.q.out =================================================================== --- ql/src/test/results/clientpositive/table_access_keys_stats.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/table_access_keys_stats.q.out (working copy) @@ -22,7 +22,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### -Operator:GBY_4 +Operator:GBY_3 Table:default@t1 Keys:key @@ -35,7 +35,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### -Operator:GBY_4 +Operator:GBY_3 Table:default@t1 Keys:key,val @@ -50,7 +50,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### -Operator:GBY_4 +Operator:GBY_3 Table:default@t1 Keys:key @@ -63,7 +63,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### -Operator:GBY_4 +Operator:GBY_3 Table:default@t1 Keys:key @@ -77,7 +77,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### -Operator:GBY_4 +Operator:GBY_3 Table:default@t1 Keys:key @@ -90,7 +90,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### -Operator:GBY_4 +Operator:GBY_3 Table:default@t1 Keys:key,val @@ -104,7 +104,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### -Operator:GBY_4 +Operator:GBY_3 Table:default@t1 Keys:key,val @@ -130,7 +130,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### -Operator:GBY_4 +Operator:GBY_3 Table:default@t1 Keys:key @@ -148,11 +148,11 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### -Operator:GBY_4 +Operator:GBY_3 Table:default@t1 Keys:key -Operator:GBY_12 +Operator:GBY_10 Table:default@t1 Keys:key @@ -218,7 +218,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### -Operator:GBY_5 +Operator:GBY_3 Table:default@t1 Keys:key,val @@ -242,7 +242,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### -Operator:GBY_5 +Operator:GBY_3 Table:default@t1 Keys:key,val Index: ql/src/test/results/clientpositive/auto_join_nulls.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join_nulls.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/auto_join_nulls.q.out (working copy) @@ -34,7 +34,7 @@ POSTHOOK: Input: default@myinput1 #### A masked pattern was here #### 13630578 -Warning: Map Join MAPJOIN[18][bigTable=?] in task 'Stage-2:MAPRED' is a cross product +Warning: Map Join MAPJOIN[17][bigTable=?] in task 'Stage-2:MAPRED' is a cross product PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b PREHOOK: type: QUERY PREHOOK: Input: default@myinput1 Index: ql/src/test/results/clientpositive/louter_join_ppr.q.out =================================================================== --- ql/src/test/results/clientpositive/louter_join_ppr.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/louter_join_ppr.q.out (working copy) @@ -1149,9 +1149,9 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /src [$hdt$_0:$hdt$_1:$hdt$_1:a] - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_0:$hdt$_0:b] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:$hdt$_0:$hdt$_0:b] + /src [$hdt$_1:$hdt$_1:a] + /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_0:b] + /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:$hdt$_0:b] Needs Tagging: true Reduce Operator Tree: Join Operator Index: ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out =================================================================== --- ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out (working copy) @@ -419,9 +419,9 @@ name: default.fact_daily name: default.fact_daily Truncated Path -> Alias: - /fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [$hdt$_0:fact_daily] - /fact_tz/ds=1/x=238 [$hdt$_0:fact_daily] - /fact_tz/ds=1/x=484 [$hdt$_0:fact_daily] + /fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [fact_daily] + /fact_tz/ds=1/x=238 [fact_daily] + /fact_tz/ds=1/x=484 [fact_daily] Stage: Stage-0 Fetch Operator Index: ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out =================================================================== --- ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out (working copy) @@ -489,7 +489,7 @@ name: default.fact_daily name: default.fact_daily Truncated Path -> Alias: - /fact_daily/ds=1/hr=1 [$hdt$_0:$hdt$_0:fact_daily] + /fact_daily/ds=1/hr=1 [$hdt$_0:fact_daily] Needs Tagging: false Reduce Operator Tree: Group By Operator Index: ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out =================================================================== --- ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out (working copy) @@ -3192,7 +3192,7 @@ Map Reduce Map Operator Tree: TableScan - alias: $hdt$_0:$hdt$_0:default.default__lineitem_ix_lineitem_ix_l_orderkey_idx__ + alias: $hdt$_0:default.default__lineitem_ix_lineitem_ix_l_orderkey_idx__ Statistics: Num rows: 26 Data size: 2604 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: l_orderkey (type: int), (l_orderkey + 1) (type: int), _count_of_l_orderkey (type: bigint) @@ -3282,7 +3282,7 @@ Map Reduce Map Operator Tree: TableScan - alias: $hdt$_0:$hdt$_0:default.default__lineitem_ix_lineitem_ix_l_orderkey_idx__ + alias: $hdt$_0:default.default__lineitem_ix_lineitem_ix_l_orderkey_idx__ Statistics: Num rows: 26 Data size: 2604 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: (l_orderkey + 2) (type: int), l_orderkey (type: int), _count_of_l_orderkey (type: bigint) Index: ql/src/test/results/clientpositive/subquery_notin.q.out =================================================================== --- ql/src/test/results/clientpositive/subquery_notin.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/subquery_notin.q.out (working copy) @@ -285,7 +285,7 @@ 199 val_199 199 val_199 2 val_2 -Warning: Shuffle Join JOIN[32][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: -- non agg, corr explain select p_mfgr, b.p_name, p_size @@ -321,44 +321,40 @@ TableScan alias: b Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int) - outputColumnNames: _col0, _col1, _col2 + Reduce Output Operator + key expressions: p_mfgr (type: string), p_size (type: int) + sort order: ++ + Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col1 (type: string), _col2 (type: int) - sort order: ++ - Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) + value expressions: p_name (type: string) Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) - outputColumnNames: _col0, _col1, _col2 + expressions: VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) + outputColumnNames: _col1, _col2, _col5 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: string, _col1: string, _col2: int + output shape: _col1: string, _col2: string, _col5: int type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2 - partition by: _col1 + order by: _col5 + partition by: _col2 raw input shape: window functions: window function definition alias: _wcol0 - arguments: _col2 + arguments: _col5 name: rank window function: GenericUDAFRankEvaluator window frame: PRECEDING(MAX)~FOLLOWING(MAX) isPivotResult: true Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_wcol0 <= 2) and (_col0 is null or _col1 is null)) (type: boolean) + predicate: ((_wcol0 <= 2) and (_col1 is null or _col2 is null)) (type: boolean) Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE @@ -480,37 +476,33 @@ TableScan alias: b Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int) - outputColumnNames: _col0, _col1, _col2 + Reduce Output Operator + key expressions: p_mfgr (type: string), p_size (type: int) + sort order: ++ + Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col1 (type: string), _col2 (type: int) - sort order: ++ - Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) + value expressions: p_name (type: string) Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) - outputColumnNames: _col0, _col1, _col2 + expressions: VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) + outputColumnNames: _col1, _col2, _col5 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: string, _col1: string, _col2: int + output shape: _col1: string, _col2: string, _col5: int type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2 - partition by: _col1 + order by: _col5 + partition by: _col2 raw input shape: window functions: window function definition alias: _wcol0 - arguments: _col2 + arguments: _col5 name: rank window function: GenericUDAFRankEvaluator window frame: PRECEDING(MAX)~FOLLOWING(MAX) @@ -520,7 +512,7 @@ predicate: (_wcol0 <= 2) (type: boolean) Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string) + expressions: _col1 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -536,7 +528,7 @@ Processor Tree: ListSink -Warning: Shuffle Join JOIN[32][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select p_mfgr, b.p_name, p_size from part b where b.p_name not in @@ -575,7 +567,7 @@ Manufacturer#5 almond antique blue firebrick mint 31 Manufacturer#5 almond aquamarine dodger light gainsboro 46 Manufacturer#5 almond azure blanched chiffon midnight 23 -Warning: Shuffle Join JOIN[45][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product +Warning: Shuffle Join JOIN[43][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: -- agg, non corr explain select p_name, p_size @@ -612,36 +604,32 @@ TableScan alias: part Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: p_mfgr (type: string), p_size (type: int) - outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: p_mfgr (type: string), p_size (type: int) + sort order: ++ + Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int) - sort order: ++ - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) - outputColumnNames: _col0, _col1 + outputColumnNames: _col2, _col5 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: string, _col1: int + output shape: _col2: string, _col5: int type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 - partition by: _col0 + order by: _col5 + partition by: _col2 raw input shape: window functions: window function definition alias: _wcol0 - arguments: _col1 + arguments: _col5 name: rank window function: GenericUDAFRankEvaluator window frame: PRECEDING(MAX)~FOLLOWING(MAX) @@ -651,7 +639,7 @@ predicate: (_wcol0 <= 2) (type: boolean) Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: int) + expressions: _col5 (type: int) outputColumnNames: _col0 Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -763,36 +751,32 @@ TableScan alias: part Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: p_mfgr (type: string), p_size (type: int) - outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: p_mfgr (type: string), p_size (type: int) + sort order: ++ + Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int) - sort order: ++ - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) - outputColumnNames: _col0, _col1 + outputColumnNames: _col2, _col5 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: string, _col1: int + output shape: _col2: string, _col5: int type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 - partition by: _col0 + order by: _col5 + partition by: _col2 raw input shape: window functions: window function definition alias: _wcol0 - arguments: _col1 + arguments: _col5 name: rank window function: GenericUDAFRankEvaluator window frame: PRECEDING(MAX)~FOLLOWING(MAX) @@ -802,7 +786,7 @@ predicate: (_wcol0 <= 2) (type: boolean) Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: int) + expressions: _col5 (type: int) outputColumnNames: _col0 Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -859,7 +843,7 @@ Processor Tree: ListSink -Warning: Shuffle Join JOIN[45][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product +Warning: Shuffle Join JOIN[43][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product PREHOOK: query: select p_name, p_size from part where part.p_size not in @@ -906,7 +890,7 @@ almond aquamarine yellow dodger mint 7 almond azure aquamarine papaya violet 12 almond azure blanched chiffon midnight 23 -Warning: Shuffle Join JOIN[42][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[40][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: -- agg, corr explain select p_mfgr, p_name, p_size @@ -942,36 +926,32 @@ TableScan alias: b Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: p_mfgr (type: string), p_size (type: int) - outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: p_mfgr (type: string), p_size (type: int) + sort order: ++ + Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int) - sort order: ++ - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) - outputColumnNames: _col0, _col1 + outputColumnNames: _col2, _col5 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: string, _col1: int + output shape: _col2: string, _col5: int type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 - partition by: _col0 + order by: _col5 + partition by: _col2 raw input shape: window functions: window function definition alias: _wcol0 - arguments: _col1 + arguments: _col5 name: rank window function: GenericUDAFRankEvaluator window frame: PRECEDING(MAX)~FOLLOWING(MAX) @@ -981,7 +961,7 @@ predicate: (_wcol0 <= 2) (type: boolean) Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: int) + expressions: _col2 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -1137,36 +1117,32 @@ TableScan alias: b Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: p_mfgr (type: string), p_size (type: int) - outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: p_mfgr (type: string), p_size (type: int) + sort order: ++ + Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int) - sort order: ++ - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) - outputColumnNames: _col0, _col1 + outputColumnNames: _col2, _col5 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: string, _col1: int + output shape: _col2: string, _col5: int type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 - partition by: _col0 + order by: _col5 + partition by: _col2 raw input shape: window functions: window function definition alias: _wcol0 - arguments: _col1 + arguments: _col5 name: rank window function: GenericUDAFRankEvaluator window frame: PRECEDING(MAX)~FOLLOWING(MAX) @@ -1176,7 +1152,7 @@ predicate: (_wcol0 <= 2) (type: boolean) Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: int) + expressions: _col2 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -1226,7 +1202,7 @@ Processor Tree: ListSink -Warning: Shuffle Join JOIN[42][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[40][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select p_mfgr, p_name, p_size from part b where b.p_size not in (select min(p_size) @@ -1267,7 +1243,7 @@ Manufacturer#5 almond azure blanched chiffon midnight 23 Manufacturer#5 almond antique blue firebrick mint 31 Manufacturer#5 almond aquamarine dodger light gainsboro 46 -Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: -- non agg, non corr, Group By in Parent Query select li.l_partkey, count(*) from lineitem li @@ -1466,7 +1442,7 @@ POSTHOOK: Input: default@t1_v POSTHOOK: Output: database:default POSTHOOK: Output: default@T2_v -Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: explain select * from T1_v where T1_v.key not in (select T2_v.key from T2_v) @@ -1611,7 +1587,7 @@ Processor Tree: ListSink -Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: select * from T1_v where T1_v.key not in (select T2_v.key from T2_v) PREHOOK: type: QUERY Index: ql/src/test/results/clientpositive/subquery_views.q.out =================================================================== --- ql/src/test/results/clientpositive/subquery_views.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/subquery_views.q.out (working copy) @@ -69,8 +69,8 @@ POSTHOOK: Input: default@src POSTHOOK: Output: database:default POSTHOOK: Output: default@cv2 -Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -Warning: Shuffle Join JOIN[50][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product +Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[46][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product PREHOOK: query: explain select * from cv2 where cv2.key in (select key from cv2 c where c.key < '11') @@ -378,8 +378,8 @@ Processor Tree: ListSink -Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -Warning: Shuffle Join JOIN[50][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product +Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[46][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product PREHOOK: query: select * from cv2 where cv2.key in (select key from cv2 c where c.key < '11') PREHOOK: type: QUERY Index: ql/src/test/results/clientpositive/auto_join_without_localtask.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join_without_localtask.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/auto_join_without_localtask.q.out (working copy) @@ -270,11 +270,11 @@ Stage: Stage-14 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_1:a + $hdt$_1:a Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_1:a + $hdt$_1:a TableScan alias: a Filter Operator @@ -319,11 +319,11 @@ Stage: Stage-12 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_2:a + $hdt$_2:a Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_2:a + $hdt$_2:a TableScan alias: a Filter Operator @@ -406,11 +406,11 @@ Local Work: Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$INTNAME + $INTNAME Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$INTNAME + $INTNAME TableScan Stage: Stage-2 @@ -457,11 +457,11 @@ Stage: Stage-15 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:a + $hdt$_0:a Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:a + $hdt$_0:a TableScan alias: a Filter Operator @@ -639,11 +639,11 @@ Stage: Stage-13 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_1:$hdt$_2:a + $hdt$_1:$hdt$_2:a Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_1:$hdt$_2:a + $hdt$_1:$hdt$_2:a TableScan alias: a Filter Operator @@ -714,11 +714,11 @@ Local Work: Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$INTNAME + $INTNAME Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$INTNAME + $INTNAME TableScan Stage: Stage-2 @@ -748,11 +748,11 @@ Stage: Stage-12 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:a + $hdt$_0:a Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:a + $hdt$_0:a TableScan alias: a Filter Operator @@ -836,11 +836,11 @@ Stage: Stage-14 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_1:$hdt$_1:a + $hdt$_1:$hdt$_1:a Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_1:$hdt$_1:a + $hdt$_1:$hdt$_1:a TableScan alias: a Filter Operator Index: ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out =================================================================== --- ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out (revision 1669497) +++ ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out (working copy) @@ -295,8 +295,8 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=11 [srcpart] + /srcpart/ds=2008-04-08/hr=12 [srcpart] Stage: Stage-0 Move Operator @@ -650,8 +650,8 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=11 [srcpart] + /srcpart/ds=2008-04-08/hr=12 [srcpart] Stage: Stage-7 Conditional Operator Index: ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out (working copy) @@ -77,11 +77,11 @@ Stage: Stage-9 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$INTNAME1 + $INTNAME1 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$INTNAME1 + $INTNAME1 TableScan HashTable Sink Operator keys: @@ -114,11 +114,11 @@ Stage: Stage-10 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$INTNAME + $INTNAME Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$INTNAME + $INTNAME TableScan HashTable Sink Operator keys: @@ -188,11 +188,11 @@ Stage: Stage-11 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_1:$hdt$_1:lineitem + $hdt$_1:lineitem Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_1:$hdt$_1:lineitem + $hdt$_1:lineitem TableScan alias: lineitem Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE @@ -342,11 +342,11 @@ Stage: Stage-9 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$INTNAME1 + $INTNAME1 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$INTNAME1 + $INTNAME1 TableScan HashTable Sink Operator keys: @@ -379,11 +379,11 @@ Stage: Stage-10 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$INTNAME + $INTNAME Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$INTNAME + $INTNAME TableScan HashTable Sink Operator keys: @@ -453,11 +453,11 @@ Stage: Stage-11 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_1:$hdt$_1:lineitem + $hdt$_1:lineitem Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_1:$hdt$_1:lineitem + $hdt$_1:lineitem TableScan alias: lineitem Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE Index: ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out =================================================================== --- ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out (working copy) @@ -228,8 +228,8 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_0:src] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:$hdt$_0:src] + /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:src] + /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:src] Needs Tagging: false Reduce Operator Tree: Group By Operator Index: ql/src/test/results/clientpositive/leadlag.q.out =================================================================== --- ql/src/test/results/clientpositive/leadlag.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/leadlag.q.out (working copy) @@ -223,6 +223,8 @@ Manufacturer#5 almond aquamarine dodger light gainsboro 46 17 Manufacturer#5 almond azure blanched chiffon midnight 23 21 PREHOOK: query: -- 6. testRankInLead +-- disable cbo because of CALCITE-653 + select p_mfgr, p_name, p_size, r1, lead(r1,1,r1) over (distribute by p_mfgr sort by p_name) as deltaRank from ( @@ -234,6 +236,8 @@ PREHOOK: Input: default@part #### A masked pattern was here #### POSTHOOK: query: -- 6. testRankInLead +-- disable cbo because of CALCITE-653 + select p_mfgr, p_name, p_size, r1, lead(r1,1,r1) over (distribute by p_mfgr sort by p_name) as deltaRank from ( Index: ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out =================================================================== --- ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out (working copy) @@ -338,7 +338,7 @@ name: default.src name: default.src Truncated Path -> Alias: - /src [$hdt$_0:src] + /src [src] Needs Tagging: false Reduce Operator Tree: Select Operator Index: ql/src/test/results/clientpositive/auto_join11.q.out =================================================================== --- ql/src/test/results/clientpositive/auto_join11.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/auto_join11.q.out (working copy) @@ -23,11 +23,11 @@ Stage: Stage-5 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$hdt$_0:src + $hdt$_0:$hdt$_0:src Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$hdt$_0:src + $hdt$_0:$hdt$_0:src TableScan alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Index: ql/src/test/results/clientpositive/str_to_map.q.java1.7.out =================================================================== --- ql/src/test/results/clientpositive/str_to_map.q.java1.7.out (revision 1669497) +++ ql/src/test/results/clientpositive/str_to_map.q.java1.7.out (working copy) @@ -31,10 +31,10 @@ Select Operator expressions: str_to_map('a=1,b=2,c=3',',','=')['a'] (type: string) outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 42500 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 3 - Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 255 Basic stats: COMPLETE Column stats: COMPLETE ListSink PREHOOK: query: select str_to_map('a=1,b=2,c=3',',','=')['a'] from src limit 3 @@ -66,10 +66,10 @@ Select Operator expressions: str_to_map('a:1,b:2,c:3') (type: map) outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 460000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 377000 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 3 - Statistics: Num rows: 3 Data size: 2760 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 2262 Basic stats: COMPLETE Column stats: COMPLETE ListSink PREHOOK: query: select str_to_map('a:1,b:2,c:3') from src limit 3 @@ -101,10 +101,10 @@ Select Operator expressions: str_to_map('a:1,b:2,c:3',',',':') (type: map) outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 460000 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 500 Data size: 377000 Basic stats: COMPLETE Column stats: COMPLETE Limit Number of rows: 3 - Statistics: Num rows: 3 Data size: 2760 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 2262 Basic stats: COMPLETE Column stats: COMPLETE ListSink PREHOOK: query: select str_to_map('a:1,b:2,c:3',',',':') from src limit 3 Index: ql/src/test/results/clientpositive/groupby_sort_1_23.q.out =================================================================== --- ql/src/test/results/clientpositive/groupby_sort_1_23.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/groupby_sort_1_23.q.out (working copy) @@ -189,7 +189,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [$hdt$_0:$hdt$_0:t1] + /t1 [$hdt$_0:t1] Stage: Stage-7 Conditional Operator @@ -526,7 +526,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [$hdt$_0:$hdt$_0:t1] + /t1 [$hdt$_0:t1] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -784,7 +784,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [$hdt$_0:$hdt$_0:t1] + /t1 [$hdt$_0:t1] Stage: Stage-7 Conditional Operator @@ -1192,7 +1192,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [$hdt$_0:$hdt$_0:t1] + /t1 [$hdt$_0:t1] Stage: Stage-7 Conditional Operator @@ -1483,8 +1483,13 @@ STAGE DEPENDENCIES: Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 STAGE PLANS: Stage: Stage-1 @@ -1501,17 +1506,39 @@ Group By Operator aggregations: count(1) keys: 1 (type: int), _col1 (type: string) - mode: hash + mode: final outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: string) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col2 (type: bigint) - auto parallelism: false + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,cnt + columns.comments + columns.types int:int:int +#### A masked pattern was here #### + name default.outputtbl3 + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl3 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: @@ -1564,26 +1591,53 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [$hdt$_0:$hdt$_0:t1] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: int), KEY._col1 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + /t1 [$hdt$_0:t1] + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + replace: true +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,cnt + columns.comments + columns.types int:int:int +#### A masked pattern was here #### + name default.outputtbl3 + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl3 + + Stage: Stage-2 + Stats-Aggr Operator +#### A masked pattern was here #### + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + GatherStats: false File Output Operator compressed: false - GlobalTableId: 1 + GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -1601,15 +1655,98 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl3 TotalFiles: 1 - GatherStats: true + GatherStats: false MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,cnt + columns.comments + columns.types int:int:int +#### A masked pattern was here #### + name default.outputtbl3 + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,cnt + columns.comments + columns.types int:int:int +#### A masked pattern was here #### + name default.outputtbl3 + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl3 + name: default.outputtbl3 + Truncated Path -> Alias: +#### A masked pattern was here #### - Stage: Stage-0 - Move Operator - tables: - replace: true + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + GatherStats: false + File Output Operator + compressed: false + GlobalTableId: 0 #### A masked pattern was here #### - table: + NumFilesPerFileSink: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,cnt + columns.comments + columns.types int:int:int +#### A masked pattern was here #### + name default.outputtbl3 + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl3 + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,cnt + columns.comments + columns.types int:int:int +#### A masked pattern was here #### + name default.outputtbl3 + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -1625,9 +1762,14 @@ #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl3 + name: default.outputtbl3 + Truncated Path -> Alias: +#### A masked pattern was here #### - Stage: Stage-2 - Stats-Aggr Operator + Stage: Stage-6 + Move Operator + files: + hdfs directory: true #### A masked pattern was here #### PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3 @@ -1790,7 +1932,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [$hdt$_0:$hdt$_0:t1] + /t1 [$hdt$_0:t1] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -2011,7 +2153,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [$hdt$_0:$hdt$_0:t1] + /t1 [$hdt$_0:t1] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -2278,7 +2420,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [$hdt$_0:$hdt$_0:$hdt$_0:t1] + /t1 [$hdt$_0:$hdt$_0:t1] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -2626,7 +2768,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [null-subquery1:$hdt$_0-subquery1:$hdt$_0:$hdt$_0:t1, null-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_0:t1] + /t1 [null-subquery1:$hdt$_0-subquery1:$hdt$_0:t1, null-subquery2:$hdt$_0-subquery2:$hdt$_0:t1] Stage: Stage-7 Conditional Operator @@ -3051,7 +3193,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [null-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_0:t1] + /t1 [null-subquery2:$hdt$_0-subquery2:$hdt$_0:t1] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -3247,7 +3389,7 @@ name: default.t1 name: default.t1 Truncated Path -> Alias: - /t1 [null-subquery1:$hdt$_0-subquery1:$hdt$_0:$hdt$_0:t1] + /t1 [null-subquery1:$hdt$_0-subquery1:$hdt$_0:t1] #### A masked pattern was here #### Stage: Stage-8 @@ -4305,7 +4447,7 @@ name: default.t2 name: default.t2 Truncated Path -> Alias: - /t2 [$hdt$_0:$hdt$_0:t2] + /t2 [$hdt$_0:t2] Needs Tagging: false Reduce Operator Tree: Group By Operator @@ -4453,8 +4595,13 @@ STAGE DEPENDENCIES: Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 STAGE PLANS: Stage: Stage-1 @@ -4471,17 +4618,44 @@ Group By Operator aggregations: count(1) keys: _col0 (type: string), 1 (type: int), _col2 (type: string) - mode: hash + mode: final outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) - sort order: +++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col3 (type: bigint) - auto parallelism: false + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: @@ -4534,26 +4708,58 @@ name: default.t2 name: default.t2 Truncated Path -> Alias: - /t2 [$hdt$_0:$hdt$_0:t2] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + /t2 [$hdt$_0:t2] + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + replace: true +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + + Stage: Stage-2 + Stats-Aggr Operator +#### A masked pattern was here #### + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + GatherStats: false File Output Operator compressed: false - GlobalTableId: 1 + GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -4576,15 +4782,118 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl4 TotalFiles: 1 - GatherStats: true + GatherStats: false MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + name: default.outputtbl4 + Truncated Path -> Alias: +#### A masked pattern was here #### - Stage: Stage-0 - Move Operator - tables: - replace: true + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + GatherStats: false + File Output Operator + compressed: false + GlobalTableId: 0 #### A masked pattern was here #### - table: + NumFilesPerFileSink: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -4605,9 +4914,14 @@ #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl4 + name: default.outputtbl4 + Truncated Path -> Alias: +#### A masked pattern was here #### - Stage: Stage-2 - Stats-Aggr Operator + Stage: Stage-6 + Move Operator + files: + hdfs directory: true #### A masked pattern was here #### PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 @@ -4696,8 +5010,13 @@ STAGE DEPENDENCIES: Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 STAGE PLANS: Stage: Stage-1 @@ -4714,17 +5033,39 @@ Group By Operator aggregations: count(1) keys: _col0 (type: string), 1 (type: int), _col2 (type: string), 2 (type: int) - mode: hash + mode: final outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string), _col3 (type: int) - sort order: ++++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string), _col3 (type: int) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col4 (type: bigint) - auto parallelism: false + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), _col3 (type: int), UDFToInteger(_col4) (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,key3,key4,cnt + columns.comments + columns.types int:int:string:int:int +#### A masked pattern was here #### + name default.outputtbl5 + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl5 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: @@ -4777,26 +5118,53 @@ name: default.t2 name: default.t2 Truncated Path -> Alias: - /t2 [$hdt$_0:$hdt$_0:t2] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string), KEY._col3 (type: int) - mode: mergepartial - outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), _col3 (type: int), UDFToInteger(_col4) (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + /t2 [$hdt$_0:t2] + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + replace: true +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,key3,key4,cnt + columns.comments + columns.types int:int:string:int:int +#### A masked pattern was here #### + name default.outputtbl5 + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl5 + + Stage: Stage-2 + Stats-Aggr Operator +#### A masked pattern was here #### + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + GatherStats: false File Output Operator compressed: false - GlobalTableId: 1 + GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -4814,15 +5182,98 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl5 TotalFiles: 1 - GatherStats: true + GatherStats: false MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,key3,key4,cnt + columns.comments + columns.types int:int:string:int:int +#### A masked pattern was here #### + name default.outputtbl5 + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,key3,key4,cnt + columns.comments + columns.types int:int:string:int:int +#### A masked pattern was here #### + name default.outputtbl5 + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl5 + name: default.outputtbl5 + Truncated Path -> Alias: +#### A masked pattern was here #### - Stage: Stage-0 - Move Operator - tables: - replace: true + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + GatherStats: false + File Output Operator + compressed: false + GlobalTableId: 0 #### A masked pattern was here #### - table: + NumFilesPerFileSink: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,key3,key4,cnt + columns.comments + columns.types int:int:string:int:int +#### A masked pattern was here #### + name default.outputtbl5 + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl5 + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + bucket_count -1 + columns key1,key2,key3,key4,cnt + columns.comments + columns.types int:int:string:int:int +#### A masked pattern was here #### + name default.outputtbl5 + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -4838,9 +5289,14 @@ #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl5 + name: default.outputtbl5 + Truncated Path -> Alias: +#### A masked pattern was here #### - Stage: Stage-2 - Stats-Aggr Operator + Stage: Stage-6 + Move Operator + files: + hdfs directory: true #### A masked pattern was here #### PREHOOK: query: INSERT OVERWRITE TABLE outputTbl5 @@ -4943,8 +5399,13 @@ STAGE DEPENDENCIES: Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 STAGE PLANS: Stage: Stage-1 @@ -4961,17 +5422,44 @@ Group By Operator aggregations: count(1) keys: _col0 (type: string), 1 (type: int), _col2 (type: string) - mode: hash + mode: final outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) - sort order: +++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col3 (type: bigint) - auto parallelism: false + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: @@ -5024,26 +5512,58 @@ name: default.t2 name: default.t2 Truncated Path -> Alias: - /t2 [$hdt$_0:$hdt$_0:$hdt$_0:t2] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + /t2 [$hdt$_0:t2] + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + replace: true +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + + Stage: Stage-2 + Stats-Aggr Operator +#### A masked pattern was here #### + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + GatherStats: false File Output Operator compressed: false - GlobalTableId: 1 + GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -5066,15 +5586,118 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl4 TotalFiles: 1 - GatherStats: true + GatherStats: false MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + name: default.outputtbl4 + Truncated Path -> Alias: +#### A masked pattern was here #### - Stage: Stage-0 - Move Operator - tables: - replace: true + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + GatherStats: false + File Output Operator + compressed: false + GlobalTableId: 0 #### A masked pattern was here #### - table: + NumFilesPerFileSink: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -5095,9 +5718,14 @@ #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl4 + name: default.outputtbl4 + Truncated Path -> Alias: +#### A masked pattern was here #### - Stage: Stage-2 - Stats-Aggr Operator + Stage: Stage-6 + Move Operator + files: + hdfs directory: true #### A masked pattern was here #### PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 @@ -5229,8 +5857,13 @@ STAGE DEPENDENCIES: Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 STAGE PLANS: Stage: Stage-1 @@ -5246,19 +5879,45 @@ Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - bucketGroup: true keys: _col0 (type: string), 2 (type: int), _col2 (type: string) - mode: hash + mode: final outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string) - sort order: +++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE - tag: -1 - value expressions: _col3 (type: bigint) - auto parallelism: false + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + GlobalTableId: 1 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + TotalFiles: 1 + GatherStats: true + MultiFileSpray: false Path -> Alias: #### A masked pattern was here #### Path -> Partition: @@ -5311,26 +5970,58 @@ name: default.t2 name: default.t2 Truncated Path -> Alias: - /t2 [$hdt$_0:$hdt$_0:$hdt$_0:t2] - Needs Tagging: false - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE + /t2 [$hdt$_0:t2] + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + replace: true +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + + Stage: Stage-2 + Stats-Aggr Operator +#### A masked pattern was here #### + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + GatherStats: false File Output Operator compressed: false - GlobalTableId: 1 + GlobalTableId: 0 #### A masked pattern was here #### NumFilesPerFileSink: 1 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE -#### A masked pattern was here #### table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -5353,15 +6044,118 @@ serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl4 TotalFiles: 1 - GatherStats: true + GatherStats: false MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + name: default.outputtbl4 + Truncated Path -> Alias: +#### A masked pattern was here #### - Stage: Stage-0 - Move Operator - tables: - replace: true + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + GatherStats: false + File Output Operator + compressed: false + GlobalTableId: 0 #### A masked pattern was here #### - table: + NumFilesPerFileSink: 1 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.outputtbl4 + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: -ext-10001 + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns key1,key2,key3,cnt + columns.comments + columns.types int:int:string:int +#### A masked pattern was here #### + name default.outputtbl4 + numFiles 1 + numRows 6 + rawDataSize 48 + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + totalSize 54 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat properties: @@ -5382,9 +6176,14 @@ #### A masked pattern was here #### serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe name: default.outputtbl4 + name: default.outputtbl4 + Truncated Path -> Alias: +#### A masked pattern was here #### - Stage: Stage-2 - Stats-Aggr Operator + Stage: Stage-6 + Move Operator + files: + hdfs directory: true #### A masked pattern was here #### PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4 Index: ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out =================================================================== --- ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out (working copy) @@ -20,20 +20,16 @@ alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: (UDFToDouble(key) + 1.0) (type: double) - outputColumnNames: _col0 + expressions: (UDFToDouble(key) + 1.0) (type: double), ((UDFToDouble(key) + 1.0) + 1.0) (type: double) + outputColumnNames: _col0, _col1 Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: _col0 (type: double), (_col0 + 1.0) (type: double) - outputColumnNames: _col0, _col1 + File Output Operator + compressed: false Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Stage: Stage-0 Fetch Operator Index: ql/src/test/results/clientpositive/list_bucket_dml_7.q.out =================================================================== --- ql/src/test/results/clientpositive/list_bucket_dml_7.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/list_bucket_dml_7.q.out (working copy) @@ -241,8 +241,8 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=11 [srcpart] + /srcpart/ds=2008-04-08/hr=12 [srcpart] Stage: Stage-0 Move Operator @@ -596,8 +596,8 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=11 [srcpart] + /srcpart/ds=2008-04-08/hr=12 [srcpart] Stage: Stage-7 Conditional Operator Index: ql/src/test/results/clientpositive/join31.q.out =================================================================== --- ql/src/test/results/clientpositive/join31.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/join31.q.out (working copy) @@ -84,11 +84,11 @@ Stage: Stage-9 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$INTNAME1 + $hdt$_0:$INTNAME1 Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$INTNAME1 + $hdt$_0:$INTNAME1 TableScan HashTable Sink Operator keys: @@ -169,11 +169,11 @@ Stage: Stage-10 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$INTNAME + $hdt$_0:$INTNAME Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$INTNAME + $hdt$_0:$INTNAME TableScan HashTable Sink Operator keys: Index: ql/src/test/results/clientpositive/join32_lessSize.q.out =================================================================== --- ql/src/test/results/clientpositive/join32_lessSize.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/join32_lessSize.q.out (working copy) @@ -119,11 +119,11 @@ Stage: Stage-8 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_1:$hdt$_2:x + $hdt$_1:$hdt$_2:x Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_1:$hdt$_2:x + $hdt$_1:$hdt$_2:x TableScan alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -279,16 +279,16 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_1:$hdt$_1:z] + /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:$hdt$_1:z] Stage: Stage-7 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:y + $hdt$_0:y Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:y + $hdt$_0:y TableScan alias: y Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -666,11 +666,11 @@ Stage: Stage-11 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_1:$hdt$_2:$hdt$_2:x + $hdt$_1:$hdt$_2:$hdt$_2:x Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_1:$hdt$_2:$hdt$_2:x + $hdt$_1:$hdt$_2:$hdt$_2:x TableScan alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -780,16 +780,16 @@ name: default.src1 name: default.src1 Truncated Path -> Alias: - /src1 [$hdt$_0:$hdt$_1:$hdt$_2:$hdt$_3:x] + /src1 [$hdt$_1:$hdt$_2:$hdt$_3:x] Stage: Stage-10 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_1:$hdt$_1:w + $hdt$_1:$hdt$_1:w Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_1:$hdt$_1:w + $hdt$_1:$hdt$_1:w TableScan alias: w Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -914,11 +914,11 @@ Stage: Stage-9 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:w + $hdt$_0:w Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:w + $hdt$_0:w TableScan alias: w Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -1319,11 +1319,11 @@ Stage: Stage-8 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x + $hdt$_1:$hdt$_2:x Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x + $hdt$_1:$hdt$_2:x TableScan alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -1481,12 +1481,12 @@ name: default.src1 name: default.src1 Truncated Path -> Alias: - /src [$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:y] + /src [$hdt$_1:$hdt$_1:y] Stage: Stage-7 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:z + $hdt$_0:z Fetch Operator limit: -1 Partition Description: @@ -1536,7 +1536,7 @@ name: default.srcpart name: default.srcpart Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:z + $hdt$_0:z TableScan alias: z Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -1931,11 +1931,11 @@ Stage: Stage-9 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$hdt$_0:y + $hdt$_0:$hdt$_0:y Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$hdt$_0:y + $hdt$_0:$hdt$_0:y TableScan alias: y Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -2089,7 +2089,7 @@ name: default.src1 name: default.src1 Truncated Path -> Alias: - /src1 [$hdt$_0:$hdt$_0:$hdt$_1:x] + /src1 [$hdt$_0:$hdt$_1:x] Stage: Stage-8 Map Reduce Local Work @@ -2469,11 +2469,11 @@ Stage: Stage-8 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x + $hdt$_1:$hdt$_2:x Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x + $hdt$_1:$hdt$_2:x TableScan alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -2526,11 +2526,11 @@ Stage: Stage-7 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:x + $hdt$_0:x Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:x + $hdt$_0:x TableScan alias: x Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -2726,11 +2726,11 @@ Stage: Stage-8 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x + $hdt$_1:$hdt$_2:x Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x + $hdt$_1:$hdt$_2:x TableScan alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE @@ -2783,11 +2783,11 @@ Stage: Stage-7 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:y + $hdt$_0:y Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:y + $hdt$_0:y TableScan alias: y Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Index: ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out =================================================================== --- ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out (working copy) @@ -207,46 +207,42 @@ TableScan alias: part2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - Select Operator - expressions: p2_mfgr (type: string), p2_size (type: int) - outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: p2_mfgr (type: string), p2_size (type: int) + sort order: ++ + Map-reduce partition columns: p2_mfgr (type: string) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int) - sort order: ++ - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) - outputColumnNames: _col0, _col1 + outputColumnNames: _col2, _col5 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: string, _col1: int + output shape: _col2: string, _col5: int type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 - partition by: _col0 + order by: _col5 + partition by: _col2 raw input shape: window functions: window function definition alias: _wcol0 - arguments: _col1 + arguments: _col5 name: rank window function: GenericUDAFRankEvaluator window frame: PRECEDING(MAX)~FOLLOWING(MAX) isPivotResult: true Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Filter Operator - predicate: ((_wcol0 <= 2) and _col0 is not null) (type: boolean) + predicate: ((_wcol0 <= 2) and _col2 is not null) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: int) + expressions: _col2 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator @@ -379,46 +375,42 @@ TableScan alias: b Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: p_mfgr (type: string), p_size (type: int) - outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: p_mfgr (type: string), p_size (type: int) + sort order: ++ + Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: int) - sort order: ++ - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) - outputColumnNames: _col0, _col1 + outputColumnNames: _col2, _col5 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: string, _col1: int + output shape: _col2: string, _col5: int type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col1 - partition by: _col0 + order by: _col5 + partition by: _col2 raw input shape: window functions: window function definition alias: _wcol0 - arguments: _col1 + arguments: _col5 name: rank window function: GenericUDAFRankEvaluator window frame: PRECEDING(MAX)~FOLLOWING(MAX) isPivotResult: true Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_wcol0 <= 2) and _col0 is not null) (type: boolean) + predicate: ((_wcol0 <= 2) and _col2 is not null) (type: boolean) Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: int) + expressions: _col2 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE Group By Operator @@ -789,7 +781,7 @@ Processor Tree: ListSink -Warning: Shuffle Join JOIN[32][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product +Warning: Shuffle Join JOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product PREHOOK: query: -- non agg, corr explain select p_mfgr, b.p_name, p_size @@ -825,44 +817,40 @@ TableScan alias: b Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int) - outputColumnNames: _col0, _col1, _col2 + Reduce Output Operator + key expressions: p_mfgr (type: string), p_size (type: int) + sort order: ++ + Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col1 (type: string), _col2 (type: int) - sort order: ++ - Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) + value expressions: p_name (type: string) Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) - outputColumnNames: _col0, _col1, _col2 + expressions: VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) + outputColumnNames: _col1, _col2, _col5 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: string, _col1: string, _col2: int + output shape: _col1: string, _col2: string, _col5: int type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2 - partition by: _col1 + order by: _col5 + partition by: _col2 raw input shape: window functions: window function definition alias: _wcol0 - arguments: _col2 + arguments: _col5 name: rank window function: GenericUDAFRankEvaluator window frame: PRECEDING(MAX)~FOLLOWING(MAX) isPivotResult: true Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_wcol0 <= 2) and (_col0 is null or _col1 is null)) (type: boolean) + predicate: ((_wcol0 <= 2) and (_col1 is null or _col2 is null)) (type: boolean) Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE @@ -984,37 +972,33 @@ TableScan alias: b Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int) - outputColumnNames: _col0, _col1, _col2 + Reduce Output Operator + key expressions: p_mfgr (type: string), p_size (type: int) + sort order: ++ + Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col1 (type: string), _col2 (type: int) - sort order: ++ - Map-reduce partition columns: _col1 (type: string) - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string) + value expressions: p_name (type: string) Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) - outputColumnNames: _col0, _col1, _col2 + expressions: VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int) + outputColumnNames: _col1, _col2, _col5 Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE PTF Operator Function definitions: Input definition input alias: ptf_0 - output shape: _col0: string, _col1: string, _col2: int + output shape: _col1: string, _col2: string, _col5: int type: WINDOWING Windowing table definition input alias: ptf_1 name: windowingtablefunction - order by: _col2 - partition by: _col1 + order by: _col5 + partition by: _col2 raw input shape: window functions: window function definition alias: _wcol0 - arguments: _col2 + arguments: _col5 name: rank window function: GenericUDAFRankEvaluator window frame: PRECEDING(MAX)~FOLLOWING(MAX) @@ -1024,7 +1008,7 @@ predicate: (_wcol0 <= 2) (type: boolean) Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string) + expressions: _col1 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE File Output Operator Index: ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out =================================================================== --- ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out (revision 1669497) +++ ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out (working copy) @@ -231,8 +231,8 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=11 [srcpart] + /srcpart/ds=2008-04-08/hr=12 [srcpart] Stage: Stage-0 Move Operator Index: ql/src/test/results/clientpositive/correlationoptimizer1.q.out =================================================================== --- ql/src/test/results/clientpositive/correlationoptimizer1.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/correlationoptimizer1.q.out (working copy) @@ -329,11 +329,11 @@ Stage: Stage-6 Map Reduce Local Work Alias -> Map Local Tables: - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x + $hdt$_0:$hdt$_0:$hdt$_1:x Fetch Operator limit: -1 Alias -> Map Local Operator Tree: - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x + $hdt$_0:$hdt$_0:$hdt$_1:x TableScan alias: x Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Index: ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out =================================================================== --- ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out (revision 1669497) +++ ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out (working copy) @@ -239,8 +239,8 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=11 [srcpart] + /srcpart/ds=2008-04-08/hr=12 [srcpart] Stage: Stage-0 Move Operator @@ -534,8 +534,8 @@ name: default.srcpart name: default.srcpart Truncated Path -> Alias: - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart] - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart] + /srcpart/ds=2008-04-08/hr=11 [srcpart] + /srcpart/ds=2008-04-08/hr=12 [srcpart] Stage: Stage-7 Conditional Operator Index: ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out =================================================================== --- ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out (revision 1669497) +++ ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out (working copy) @@ -166,7 +166,7 @@ name: default.src name: default.src Truncated Path -> Alias: - /src [$hdt$_0:src] + /src [src] Stage: Stage-0 Move Operator Index: ql/src/test/results/clientpositive/vector_decimal_round.q.out =================================================================== --- ql/src/test/results/clientpositive/vector_decimal_round.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/vector_decimal_round.q.out (working copy) @@ -102,17 +102,17 @@ alias: decimal_tbl_txt Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0)) - outputColumnNames: _col0, _col1 + expressions: dec (type: decimal(10,0)) + outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: round(_col0, (- 1)) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: decimal(10,0)), _col1 (type: decimal(11,0)) + value expressions: _col0 (type: decimal(10,0)) Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: decimal(10,0)), VALUE._col1 (type: decimal(11,0)) + expressions: VALUE._col0 (type: decimal(10,0)), KEY.reducesinkkey0 (type: decimal(11,0)) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -238,17 +238,17 @@ alias: decimal_tbl_rc Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0)) - outputColumnNames: _col0, _col1 + expressions: dec (type: decimal(10,0)) + outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: round(_col0, (- 1)) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: decimal(10,0)), _col1 (type: decimal(11,0)) + value expressions: _col0 (type: decimal(10,0)) Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: decimal(10,0)), VALUE._col1 (type: decimal(11,0)) + expressions: VALUE._col0 (type: decimal(10,0)), KEY.reducesinkkey0 (type: decimal(11,0)) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -375,18 +375,18 @@ alias: decimal_tbl_orc Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0)) - outputColumnNames: _col0, _col1 + expressions: dec (type: decimal(10,0)) + outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator key expressions: round(_col0, (- 1)) (type: decimal(11,0)) sort order: + Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: decimal(10,0)), _col1 (type: decimal(11,0)) + value expressions: _col0 (type: decimal(10,0)) Execution mode: vectorized Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: decimal(10,0)), VALUE._col1 (type: decimal(11,0)) + expressions: VALUE._col0 (type: decimal(10,0)), KEY.reducesinkkey0 (type: decimal(11,0)) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE File Output Operator Index: ql/src/test/results/clientpositive/ctas_colname.q.out =================================================================== --- ql/src/test/results/clientpositive/ctas_colname.q.out (revision 1669497) +++ ql/src/test/results/clientpositive/ctas_colname.q.out (working copy) @@ -174,15 +174,11 @@ TableScan alias: src1 Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: key (type: string), value (type: string) + sort order: ++ + Map-reduce partition columns: key (type: string) Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) @@ -340,15 +336,11 @@ TableScan alias: src Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 + Reduce Output Operator + key expressions: key (type: string), value (type: string) + sort order: ++ + Map-reduce partition columns: key (type: string) Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: string) - sort order: ++ - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) Index: ql/src/test/queries/clientpositive/leadlag.q =================================================================== --- ql/src/test/queries/clientpositive/leadlag.q (revision 1669497) +++ ql/src/test/queries/clientpositive/leadlag.q (working copy) @@ -36,7 +36,10 @@ from part window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 following) ; +set hive.cbo.enable=false; -- 6. testRankInLead +-- disable cbo because of CALCITE-653 + select p_mfgr, p_name, p_size, r1, lead(r1,1,r1) over (distribute by p_mfgr sort by p_name) as deltaRank from ( @@ -45,6 +48,7 @@ from part ) a; +set hive.cbo.enable=true; -- 7. testLeadWithPTF select p_mfgr, p_name, rank() over(distribute by p_mfgr sort by p_name) as r, Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java (revision 1669497) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java (working copy) @@ -38,6 +38,9 @@ import org.apache.calcite.rel.rules.MultiJoin; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexCall; +import org.apache.calcite.rex.RexInputRef; import org.apache.calcite.rex.RexNode; import org.apache.calcite.sql.SqlAggFunction; import org.apache.calcite.sql.SqlKind; @@ -168,7 +171,27 @@ ImmutableMap.Builder inputRefToCallMapBldr = ImmutableMap.builder(); for (int i = resultSchema.size(); i < rt.getFieldCount(); i++) { if (collationInputRefs.contains(i)) { - inputRefToCallMapBldr.put(i, obChild.getChildExps().get(i)); + RexNode obyExpr = obChild.getChildExps().get(i); + if (obyExpr instanceof RexCall) { + int a = -1; + List operands = new ArrayList<>(); + for (int k = 0; k< ((RexCall) obyExpr).operands.size(); k++) { + RexNode rn = ((RexCall) obyExpr).operands.get(k); + for (int j = 0; j < resultSchema.size(); j++) { + if( obChild.getChildExps().get(j).toString().equals(rn.toString())) { + a = j; + break; + } + } if (a != -1) { + operands.add(new RexInputRef(a, rn.getType())); + } else { + operands.add(rn); + } + a = -1; + } + obyExpr = obChild.getCluster().getRexBuilder().makeCall(((RexCall)obyExpr).getOperator(), operands); + } + inputRefToCallMapBldr.put(i, obyExpr); } } ImmutableMap inputRefToCallMap = inputRefToCallMapBldr.build(); @@ -266,7 +289,7 @@ RelNode select = introduceDerivedTable(rel); parent.replaceInput(pos, select); - + return select; } @@ -352,7 +375,7 @@ return validChild; } - + private static boolean isEmptyGrpAggr(RelNode gbNode) { // Verify if both groupset and aggrfunction are empty) Aggregate aggrnode = (Aggregate) gbNode; @@ -361,12 +384,12 @@ } return false; } - + private static void replaceEmptyGroupAggr(final RelNode rel, RelNode parent) { // If this function is called, the parent should only include constant List exps = parent.getChildExps(); for (RexNode rexNode : exps) { - if (rexNode.getKind() != SqlKind.LITERAL) { + if (!rexNode.accept(new HiveCalciteUtil.ConstantFinder())) { throw new RuntimeException("We expect " + parent.toString() + " to contain only constants. However, " + rexNode.toString() + " is " + rexNode.getKind()); @@ -377,7 +400,7 @@ RelDataType longType = TypeConverter.convert(TypeInfoFactory.longTypeInfo, typeFactory); RelDataType intType = TypeConverter.convert(TypeInfoFactory.intTypeInfo, typeFactory); // Create the dummy aggregation. - SqlAggFunction countFn = (SqlAggFunction) SqlFunctionConverter.getCalciteAggFn("count", + SqlAggFunction countFn = SqlFunctionConverter.getCalciteAggFn("count", ImmutableList.of(intType), longType); // TODO: Using 0 might be wrong; might need to walk down to find the // proper index of a dummy. Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java (revision 1669497) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java (working copy) @@ -32,8 +32,16 @@ import org.apache.calcite.rel.core.Sort; import org.apache.calcite.rel.type.RelDataTypeField; import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexCall; +import org.apache.calcite.rex.RexCorrelVariable; +import org.apache.calcite.rex.RexDynamicParam; +import org.apache.calcite.rex.RexFieldAccess; import org.apache.calcite.rex.RexInputRef; +import org.apache.calcite.rex.RexLiteral; +import org.apache.calcite.rex.RexLocalRef; import org.apache.calcite.rex.RexNode; +import org.apache.calcite.rex.RexOver; +import org.apache.calcite.rex.RexRangeRef; import org.apache.calcite.rex.RexVisitor; import org.apache.calcite.rex.RexVisitorImpl; import org.apache.calcite.sql.SqlKind; @@ -535,6 +543,7 @@ boolean deterministic = true; RexVisitor visitor = new RexVisitorImpl(true) { + @Override public Void visitCall(org.apache.calcite.rex.RexCall call) { if (!call.getOperator().isDeterministic()) { throw new Util.FoundOne(call); @@ -551,4 +560,59 @@ return deterministic; } + + /** + * Walks over an expression and determines whether it is constant. + */ + public static class ConstantFinder implements RexVisitor { + + @Override + public Boolean visitLiteral(RexLiteral literal) { + return true; + } + + @Override + public Boolean visitInputRef(RexInputRef inputRef) { + return false; + } + + @Override + public Boolean visitLocalRef(RexLocalRef localRef) { + throw new RuntimeException("Not expected to be called."); + } + + @Override + public Boolean visitOver(RexOver over) { + return false; + } + + @Override + public Boolean visitCorrelVariable(RexCorrelVariable correlVariable) { + return false; + } + + @Override + public Boolean visitDynamicParam(RexDynamicParam dynamicParam) { + return false; + } + + @Override + public Boolean visitCall(RexCall call) { + // Constant if operator is deterministic and all operands are + // constant. + return call.getOperator().isDeterministic() + && RexVisitorImpl.visitArrayAnd(this, call.getOperands()); + } + + @Override + public Boolean visitRangeRef(RexRangeRef rangeRef) { + return false; + } + + @Override + public Boolean visitFieldAccess(RexFieldAccess fieldAccess) { + // ".FIELD" is constant iff "" is constant. + return fieldAccess.getReferenceExpr().accept(this); + } + } } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java (revision 1669497) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java (working copy) @@ -69,6 +69,7 @@ import org.apache.calcite.rel.rules.JoinPushTransitivePredicatesRule; import org.apache.calcite.rel.rules.JoinToMultiJoinRule; import org.apache.calcite.rel.rules.LoptOptimizeJoinRule; +import org.apache.calcite.rel.rules.ProjectMergeRule; import org.apache.calcite.rel.rules.ProjectRemoveRule; import org.apache.calcite.rel.rules.ReduceExpressionsRule; import org.apache.calcite.rel.rules.SemiJoinFilterTransposeRule; @@ -721,6 +722,7 @@ hepPgmBldr.addRuleInstance(ReduceExpressionsRule.PROJECT_INSTANCE); hepPgmBldr.addRuleInstance(ProjectRemoveRule.INSTANCE); hepPgmBldr.addRuleInstance(UnionMergeRule.INSTANCE); + hepPgmBldr.addRuleInstance(new ProjectMergeRule(false, HiveProject.DEFAULT_PROJECT_FACTORY)); hepPgm = hepPgmBldr.build(); HepPlanner hepPlanner = new HepPlanner(hepPgm); @@ -785,8 +787,6 @@ // 3. Transitive inference & Partition Pruning basePlan = hepPlan(basePlan, false, mdProvider, new JoinPushTransitivePredicatesRule( Join.class, HiveFilter.DEFAULT_FILTER_FACTORY), - // TODO: Enable it after CALCITE-407 is fixed - // RemoveTrivialProjectRule.INSTANCE, new HivePartitionPruneRule(conf)); // 4. Projection Pruning