diff --git itests/src/test/resources/testconfiguration.properties itests/src/test/resources/testconfiguration.properties index 3e89071f7d..56645bd59c 100644 --- itests/src/test/resources/testconfiguration.properties +++ itests/src/test/resources/testconfiguration.properties @@ -16,7 +16,6 @@ disabled.query.files=cbo_rp_subq_in.q,\ sample4.q,\ root_dir_external_table.q,\ sort_acid.q,\ - topnkey_grouping_sets.q,\ input31.q @@ -25,12 +24,9 @@ disabled.query.files=cbo_rp_subq_in.q,\ minitez.query.files.shared=delete_orig_table.q,\ orc_merge12.q,\ orc_vectorization_ppd.q,\ - topnkey.q,\ - topnkey_order_null.q,\ update_orig_table.q,\ vector_join_part_col_char.q,\ - vector_non_string_partition.q,\ - vector_topnkey.q + vector_non_string_partition.q # NOTE: Add tests to minitez only if it is very # specific to tez and cannot be added to minillap. @@ -218,8 +214,6 @@ minillaplocal.shared.query.files=alter_merge_2_orc.q,\ subquery_exists.q,\ subquery_in.q,\ temp_table.q,\ - topnkey.q,\ - topnkey_order_null.q,\ union2.q,\ union3.q,\ union4.q,\ @@ -336,7 +330,6 @@ minillaplocal.shared.query.files=alter_merge_2_orc.q,\ vector_reduce_groupby_duplicate_cols.q,\ vector_string_concat.q,\ vector_struct_in.q,\ - vector_topnkey.q,\ vector_udf_character_length.q,\ vector_udf_octet_length.q,\ vector_varchar_4.q,\ @@ -856,6 +849,15 @@ minillaplocal.query.files=\ tez_vector_dynpart_hashjoin_2.q,\ timestamp_4.q,\ timestamptz_5.q,\ + topnkey.q,\ + topnkey_order_null.q,\ + topnkey_grouping_sets.q,\ + topnkey_grouping_sets_functions.q,\ + topnkey_grouping_sets_order.q,\ + topnkey_order_null.q,\ + topnkey_windowing.q,\ + topnkey_windowing_order.q,\ + vector_topnkey.q,\ transitive_not_null.q,\ truncate_external_force.q,\ uber_reduce.q,\ diff --git ql/src/test/queries/clientpositive/topnkey_grouping_sets_functions.q ql/src/test/queries/clientpositive/topnkey_grouping_sets_functions.q index 60bd99a9d1..fec0e975f2 100644 --- ql/src/test/queries/clientpositive/topnkey_grouping_sets_functions.q +++ ql/src/test/queries/clientpositive/topnkey_grouping_sets_functions.q @@ -20,26 +20,26 @@ INSERT INTO t_test_grouping_sets VALUES set hive.optimize.topnkey=true; EXPLAIN -SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7; -SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7; +SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b, a LIMIT 7; +SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b, a LIMIT 7; set hive.optimize.topnkey=false; -SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7; +SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b, a LIMIT 7; set hive.optimize.topnkey=true; EXPLAIN -SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b LIMIT 7; -SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b LIMIT 7; +SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b, a LIMIT 7; +SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b, a LIMIT 7; set hive.optimize.topnkey=false; -SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b LIMIT 7; +SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b, a LIMIT 7; set hive.optimize.topnkey=true; EXPLAIN -SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7; -SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7; +SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b, a LIMIT 7; +SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b, a LIMIT 7; set hive.optimize.topnkey=false; -SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7; +SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b, a LIMIT 7; DROP TABLE IF EXISTS t_test_grouping_sets; diff --git ql/src/test/queries/clientpositive/topnkey_grouping_sets_order.q ql/src/test/queries/clientpositive/topnkey_grouping_sets_order.q index 9b0adbaecd..e00f35eb4d 100644 --- ql/src/test/queries/clientpositive/topnkey_grouping_sets_order.q +++ ql/src/test/queries/clientpositive/topnkey_grouping_sets_order.q @@ -20,19 +20,19 @@ INSERT INTO t_test_grouping_sets VALUES set hive.optimize.topnkey=true; EXPLAIN -SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST LIMIT 7; -SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST LIMIT 7; +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST, a LIMIT 7; +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST, a LIMIT 7; set hive.optimize.topnkey=false; -SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST LIMIT 7; +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST, a LIMIT 7; set hive.optimize.topnkey=true; EXPLAIN -SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST LIMIT 7; -SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST LIMIT 7; +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST, a LIMIT 7; +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST, a LIMIT 7; set hive.optimize.topnkey=false; -SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST LIMIT 7; +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST, a LIMIT 7; set hive.optimize.topnkey=true; EXPLAIN diff --git ql/src/test/results/clientpositive/llap/topnkey_grouping_sets_functions.q.out ql/src/test/results/clientpositive/llap/topnkey_grouping_sets_functions.q.out index 7e96b8e93f..7debc1ef99 100644 --- ql/src/test/results/clientpositive/llap/topnkey_grouping_sets_functions.q.out +++ ql/src/test/results/clientpositive/llap/topnkey_grouping_sets_functions.q.out @@ -44,12 +44,12 @@ POSTHOOK: Lineage: t_test_grouping_sets.a SCRIPT [] POSTHOOK: Lineage: t_test_grouping_sets.b SCRIPT [] POSTHOOK: Lineage: t_test_grouping_sets.c SCRIPT [] PREHOOK: query: EXPLAIN -SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 +SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b, a LIMIT 7 PREHOOK: type: QUERY PREHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### POSTHOOK: query: EXPLAIN -SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 +SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b, a LIMIT 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### @@ -102,9 +102,9 @@ STAGE PLANS: Statistics: Num rows: 26 Data size: 568 Basic stats: COMPLETE Column stats: COMPLETE pruneGroupingSetId: true Top N Key Operator - sort order: + - keys: _col1 (type: int) - null sort order: z + sort order: ++ + keys: _col1 (type: int), _col0 (type: int) + null sort order: zz Statistics: Num rows: 26 Data size: 568 Basic stats: COMPLETE Column stats: COMPLETE top n: 7 Select Operator @@ -112,17 +112,17 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 26 Data size: 296 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: _col1 (type: int) - null sort order: z - sort order: + + key expressions: _col1 (type: int), _col0 (type: int) + null sort order: zz + sort order: ++ Statistics: Num rows: 26 Data size: 296 Basic stats: COMPLETE Column stats: COMPLETE TopN Hash Memory Usage: 0.1 - value expressions: _col0 (type: int), _col2 (type: bigint) + value expressions: _col2 (type: bigint) Reducer 3 Execution mode: llap Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: int), KEY.reducesinkkey0 (type: int), VALUE._col1 (type: bigint) + expressions: KEY.reducesinkkey1 (type: int), KEY.reducesinkkey0 (type: int), VALUE._col0 (type: bigint) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 26 Data size: 296 Basic stats: COMPLETE Column stats: COMPLETE Limit @@ -142,11 +142,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 +PREHOOK: query: SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b, a LIMIT 7 PREHOOK: type: QUERY PREHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 +POSTHOOK: query: SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b, a LIMIT 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### @@ -155,13 +155,13 @@ NULL 1 6 5 2 3 6 2 1 NULL 2 4 -NULL 8 12 7 8 12 -PREHOOK: query: SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 +NULL 8 12 +PREHOOK: query: SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b, a LIMIT 7 PREHOOK: type: QUERY PREHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 +POSTHOOK: query: SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b, a LIMIT 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### @@ -170,15 +170,15 @@ NULL 1 6 5 2 3 6 2 1 NULL 2 4 -NULL 8 12 7 8 12 +NULL 8 12 PREHOOK: query: EXPLAIN -SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b LIMIT 7 +SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b, a LIMIT 7 PREHOOK: type: QUERY PREHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### POSTHOOK: query: EXPLAIN -SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b LIMIT 7 +SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b, a LIMIT 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### @@ -231,9 +231,9 @@ STAGE PLANS: Statistics: Num rows: 26 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE pruneGroupingSetId: true Top N Key Operator - sort order: + - keys: _col1 (type: int) - null sort order: z + sort order: ++ + keys: _col1 (type: int), _col0 (type: int) + null sort order: zz Statistics: Num rows: 26 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE top n: 7 Select Operator @@ -241,17 +241,17 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 26 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: _col1 (type: int) - null sort order: z - sort order: + + key expressions: _col1 (type: int), _col0 (type: int) + null sort order: zz + sort order: ++ Statistics: Num rows: 26 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE TopN Hash Memory Usage: 0.1 - value expressions: _col0 (type: int), _col2 (type: int) + value expressions: _col2 (type: int) Reducer 3 Execution mode: llap Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: int), KEY.reducesinkkey0 (type: int), VALUE._col1 (type: int) + expressions: KEY.reducesinkkey1 (type: int), KEY.reducesinkkey0 (type: int), VALUE._col0 (type: int) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 26 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE Limit @@ -271,11 +271,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b LIMIT 7 +PREHOOK: query: SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b, a LIMIT 7 PREHOOK: type: QUERY PREHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b LIMIT 7 +POSTHOOK: query: SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b, a LIMIT 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### @@ -284,13 +284,13 @@ NULL 1 2 5 2 3 6 2 1 NULL 2 1 -NULL 8 4 7 8 4 -PREHOOK: query: SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b LIMIT 7 +NULL 8 4 +PREHOOK: query: SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b, a LIMIT 7 PREHOOK: type: QUERY PREHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b LIMIT 7 +POSTHOOK: query: SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b, a LIMIT 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### @@ -299,15 +299,15 @@ NULL 1 2 5 2 3 6 2 1 NULL 2 1 -NULL 8 4 7 8 4 +NULL 8 4 PREHOOK: query: EXPLAIN -SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 +SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b, a LIMIT 7 PREHOOK: type: QUERY PREHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### POSTHOOK: query: EXPLAIN -SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 +SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b, a LIMIT 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### @@ -360,9 +360,9 @@ STAGE PLANS: Statistics: Num rows: 26 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE pruneGroupingSetId: true Top N Key Operator - sort order: + - keys: _col1 (type: int) - null sort order: z + sort order: ++ + keys: _col1 (type: int), _col0 (type: int) + null sort order: zz Statistics: Num rows: 26 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE top n: 7 Select Operator @@ -370,17 +370,17 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 26 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: _col1 (type: int) - null sort order: z - sort order: + + key expressions: _col1 (type: int), _col0 (type: int) + null sort order: zz + sort order: ++ Statistics: Num rows: 26 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE TopN Hash Memory Usage: 0.1 - value expressions: _col0 (type: int), _col2 (type: int) + value expressions: _col2 (type: int) Reducer 3 Execution mode: llap Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: int), KEY.reducesinkkey0 (type: int), VALUE._col1 (type: int) + expressions: KEY.reducesinkkey1 (type: int), KEY.reducesinkkey0 (type: int), VALUE._col0 (type: int) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 26 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE Limit @@ -400,11 +400,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 +PREHOOK: query: SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b, a LIMIT 7 PREHOOK: type: QUERY PREHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 +POSTHOOK: query: SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b, a LIMIT 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### @@ -413,13 +413,13 @@ NULL 1 2 5 2 3 6 2 1 NULL 2 3 -NULL 8 4 7 8 4 -PREHOOK: query: SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 +NULL 8 4 +PREHOOK: query: SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b, a LIMIT 7 PREHOOK: type: QUERY PREHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 +POSTHOOK: query: SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b, a LIMIT 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### @@ -428,8 +428,8 @@ NULL 1 2 5 2 3 6 2 1 NULL 2 3 -NULL 8 4 7 8 4 +NULL 8 4 PREHOOK: query: DROP TABLE IF EXISTS t_test_grouping_sets PREHOOK: type: DROPTABLE PREHOOK: Input: default@t_test_grouping_sets diff --git ql/src/test/results/clientpositive/llap/topnkey_grouping_sets_order.q.out ql/src/test/results/clientpositive/llap/topnkey_grouping_sets_order.q.out index d6d76fb729..74952f4857 100644 --- ql/src/test/results/clientpositive/llap/topnkey_grouping_sets_order.q.out +++ ql/src/test/results/clientpositive/llap/topnkey_grouping_sets_order.q.out @@ -44,12 +44,12 @@ POSTHOOK: Lineage: t_test_grouping_sets.a SCRIPT [] POSTHOOK: Lineage: t_test_grouping_sets.b SCRIPT [] POSTHOOK: Lineage: t_test_grouping_sets.c SCRIPT [] PREHOOK: query: EXPLAIN -SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST LIMIT 7 +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST, a LIMIT 7 PREHOOK: type: QUERY PREHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### POSTHOOK: query: EXPLAIN -SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST LIMIT 7 +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST, a LIMIT 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### @@ -99,23 +99,22 @@ STAGE PLANS: Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE pruneGroupingSetId: true Top N Key Operator - sort order: + - keys: _col1 (type: int) - null sort order: a + sort order: ++ + keys: _col1 (type: int), _col0 (type: int) + null sort order: az Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE top n: 7 Reduce Output Operator - key expressions: _col1 (type: int) - null sort order: a - sort order: + + key expressions: _col1 (type: int), _col0 (type: int) + null sort order: az + sort order: ++ Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE TopN Hash Memory Usage: 0.1 - value expressions: _col0 (type: int) Reducer 3 Execution mode: llap Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: int), KEY.reducesinkkey0 (type: int) + expressions: KEY.reducesinkkey1 (type: int), KEY.reducesinkkey0 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 26 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE Limit @@ -135,43 +134,43 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST LIMIT 7 +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST, a LIMIT 7 PREHOOK: type: QUERY PREHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST LIMIT 7 +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST, a LIMIT 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### +5 NULL +6 NULL +7 NULL 10 NULL NULL NULL NULL NULL NULL NULL -5 NULL -6 NULL -NULL NULL -PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST LIMIT 7 +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST, a LIMIT 7 PREHOOK: type: QUERY PREHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST LIMIT 7 +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST, a LIMIT 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### +5 NULL +6 NULL +7 NULL 10 NULL NULL NULL NULL NULL NULL NULL -5 NULL -6 NULL -NULL NULL PREHOOK: query: EXPLAIN -SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST LIMIT 7 +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST, a LIMIT 7 PREHOOK: type: QUERY PREHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### POSTHOOK: query: EXPLAIN -SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST LIMIT 7 +SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST, a LIMIT 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### @@ -221,23 +220,22 @@ STAGE PLANS: Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE pruneGroupingSetId: true Top N Key Operator - sort order: + - keys: _col1 (type: int) - null sort order: z + sort order: ++ + keys: _col1 (type: int), _col0 (type: int) + null sort order: zz Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE top n: 7 Reduce Output Operator - key expressions: _col1 (type: int) - null sort order: z - sort order: + + key expressions: _col1 (type: int), _col0 (type: int) + null sort order: zz + sort order: ++ Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE TopN Hash Memory Usage: 0.1 - value expressions: _col0 (type: int) Reducer 3 Execution mode: llap Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: int), KEY.reducesinkkey0 (type: int) + expressions: KEY.reducesinkkey1 (type: int), KEY.reducesinkkey0 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 26 Data size: 88 Basic stats: COMPLETE Column stats: COMPLETE Limit @@ -257,11 +255,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST LIMIT 7 +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST, a LIMIT 7 PREHOOK: type: QUERY PREHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST LIMIT 7 +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST, a LIMIT 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### @@ -270,13 +268,13 @@ NULL 1 5 2 6 2 NULL 2 -NULL 8 7 8 -PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST LIMIT 7 +NULL 8 +PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST, a LIMIT 7 PREHOOK: type: QUERY PREHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### -POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST LIMIT 7 +POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST, a LIMIT 7 POSTHOOK: type: QUERY POSTHOOK: Input: default@t_test_grouping_sets #### A masked pattern was here #### @@ -285,8 +283,8 @@ NULL 1 5 2 6 2 NULL 2 -NULL 8 7 8 +NULL 8 PREHOOK: query: EXPLAIN SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a DESC, b ASC LIMIT 7 PREHOOK: type: QUERY diff --git ql/src/test/results/clientpositive/llap/topnkey_windowing.q.out ql/src/test/results/clientpositive/llap/topnkey_windowing.q.out index 80aa189ef8..64772ea10d 100644 --- ql/src/test/results/clientpositive/llap/topnkey_windowing.q.out +++ ql/src/test/results/clientpositive/llap/topnkey_windowing.q.out @@ -249,7 +249,7 @@ STAGE PLANS: Map-reduce partition columns: tw_code (type: string) Statistics: Num rows: 26 Data size: 1969 Basic stats: COMPLETE Column stats: COMPLETE TopN Hash Memory Usage: 0.1 - Execution mode: llap + Execution mode: vectorized, llap LLAP IO: no inputs Reducer 2 Execution mode: vectorized, llap @@ -616,7 +616,7 @@ STAGE PLANS: TopN Hash Memory Usage: 0.1 value expressions: tw_code (type: string) auto parallelism: true - Execution mode: llap + Execution mode: vectorized, llap LLAP IO: no inputs Path -> Alias: #### A masked pattern was here #### @@ -830,7 +830,7 @@ STAGE PLANS: Map-reduce partition columns: tw_code (type: string) Statistics: Num rows: 26 Data size: 1969 Basic stats: COMPLETE Column stats: COMPLETE TopN Hash Memory Usage: 0.1 - Execution mode: llap + Execution mode: vectorized, llap LLAP IO: no inputs Reducer 2 Execution mode: vectorized, llap diff --git ql/src/test/results/clientpositive/tez/topnkey.q.out ql/src/test/results/clientpositive/tez/topnkey.q.out deleted file mode 100644 index 5ba6ac4716..0000000000 --- ql/src/test/results/clientpositive/tez/topnkey.q.out +++ /dev/null @@ -1,399 +0,0 @@ -PREHOOK: query: EXPLAIN -SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN -SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: hdfs://### HDFS PATH ### -Plan optimized by CBO. - -Vertex dependency in root stage -Reducer 2 <- Map 1 (SIMPLE_EDGE) -Reducer 3 <- Reducer 2 (SIMPLE_EDGE) - -Stage-0 - Fetch Operator - limit:5 - Stage-1 - Reducer 3 - File Output Operator [FS_10] - Limit [LIM_9] (rows=5 width=95) - Number of rows:5 - Select Operator [SEL_8] (rows=250 width=95) - Output:["_col0","_col1"] - <-Reducer 2 [SIMPLE_EDGE] - SHUFFLE [RS_7] - Group By Operator [GBY_5] (rows=250 width=95) - Output:["_col0","_col1"],aggregations:["sum(VALUE._col0)"],keys:KEY._col0 - <-Map 1 [SIMPLE_EDGE] - SHUFFLE [RS_4] - PartitionCols:_col0 - Group By Operator [GBY_3] (rows=250 width=95) - Output:["_col0","_col1"],aggregations:["sum(_col1)"],keys:_col0 - Select Operator [SEL_1] (rows=500 width=178) - Output:["_col0","_col1"] - Top N Key Operator [TNK_14] (rows=500 width=178) - keys:key,top n:5 - TableScan [TS_0] (rows=500 width=178) - default@src,src,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"] - -PREHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: hdfs://### HDFS PATH ### -0 0 -10 10 -100 200 -103 206 -104 208 -PREHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: hdfs://### HDFS PATH ### -0 0 -10 10 -100 200 -103 206 -104 208 -PREHOOK: query: EXPLAIN -SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key LIMIT 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN -SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key LIMIT 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: hdfs://### HDFS PATH ### -Plan optimized by CBO. - -Vertex dependency in root stage -Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE) -Reducer 3 <- Reducer 2 (SIMPLE_EDGE) -Reducer 4 <- Reducer 3 (SIMPLE_EDGE) - -Stage-0 - Fetch Operator - limit:5 - Stage-1 - Reducer 4 - File Output Operator [FS_16] - Limit [LIM_15] (rows=5 width=178) - Number of rows:5 - Select Operator [SEL_14] (rows=395 width=178) - Output:["_col0","_col1"] - <-Reducer 3 [SIMPLE_EDGE] - SHUFFLE [RS_13] - Group By Operator [GBY_11] (rows=395 width=178) - Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 - <-Reducer 2 [SIMPLE_EDGE] - SHUFFLE [RS_10] - PartitionCols:_col0, _col1 - Group By Operator [GBY_9] (rows=395 width=178) - Output:["_col0","_col1"],keys:_col0, _col2 - Top N Key Operator [TNK_25] (rows=791 width=178) - keys:_col0, _col2,top n:5 - Merge Join Operator [MERGEJOIN_33] (rows=791 width=178) - Conds:RS_5._col0=RS_6._col0(Left Outer),Output:["_col0","_col2"] - <-Map 1 [SIMPLE_EDGE] - SHUFFLE [RS_5] - PartitionCols:_col0 - Select Operator [SEL_1] (rows=500 width=87) - Output:["_col0"] - Top N Key Operator [TNK_23] (rows=500 width=87) - keys:key,top n:5 - TableScan [TS_0] (rows=500 width=87) - default@src,src1,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] - <-Map 5 [SIMPLE_EDGE] - SHUFFLE [RS_6] - PartitionCols:_col0 - Select Operator [SEL_4] (rows=500 width=178) - Output:["_col0","_col1"] - Filter Operator [FIL_18] (rows=500 width=178) - predicate:key is not null - TableScan [TS_2] (rows=500 width=178) - default@src,src2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"] - -PREHOOK: query: SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key LIMIT 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key LIMIT 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: hdfs://### HDFS PATH ### -0 val_0 -10 val_10 -100 val_100 -103 val_103 -104 val_104 -PREHOOK: query: SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key LIMIT 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key LIMIT 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: hdfs://### HDFS PATH ### -0 val_0 -10 val_10 -100 val_100 -103 val_103 -104 val_104 -PREHOOK: query: EXPLAIN -SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key NULLS FIRST LIMIT 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN -SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key NULLS FIRST LIMIT 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: hdfs://### HDFS PATH ### -Plan optimized by CBO. - -Vertex dependency in root stage -Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 5 (SIMPLE_EDGE) -Reducer 3 <- Reducer 2 (SIMPLE_EDGE) -Reducer 4 <- Reducer 3 (SIMPLE_EDGE) - -Stage-0 - Fetch Operator - limit:5 - Stage-1 - Reducer 4 - File Output Operator [FS_16] - Limit [LIM_15] (rows=5 width=178) - Number of rows:5 - Select Operator [SEL_14] (rows=395 width=178) - Output:["_col0","_col1"] - <-Reducer 3 [SIMPLE_EDGE] - SHUFFLE [RS_13] - Group By Operator [GBY_11] (rows=395 width=178) - Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 - <-Reducer 2 [SIMPLE_EDGE] - SHUFFLE [RS_10] - PartitionCols:_col0, _col1 - Group By Operator [GBY_9] (rows=395 width=178) - Output:["_col0","_col1"],keys:_col0, _col2 - Top N Key Operator [TNK_24] (rows=791 width=178) - keys:_col0, _col2,top n:5 - Merge Join Operator [MERGEJOIN_30] (rows=791 width=178) - Conds:RS_5._col0=RS_6._col0(Left Outer),Output:["_col0","_col2"] - <-Map 1 [SIMPLE_EDGE] - SHUFFLE [RS_5] - PartitionCols:_col0 - Select Operator [SEL_1] (rows=500 width=87) - Output:["_col0"] - TableScan [TS_0] (rows=500 width=87) - default@src,src1,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] - <-Map 5 [SIMPLE_EDGE] - SHUFFLE [RS_6] - PartitionCols:_col0 - Select Operator [SEL_4] (rows=500 width=178) - Output:["_col0","_col1"] - Filter Operator [FIL_18] (rows=500 width=178) - predicate:key is not null - TableScan [TS_2] (rows=500 width=178) - default@src,src2,Tbl:COMPLETE,Col:COMPLETE,Output:["key","value"] - -PREHOOK: query: SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key NULLS FIRST LIMIT 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key NULLS FIRST LIMIT 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: hdfs://### HDFS PATH ### -0 val_0 -10 val_10 -100 val_100 -103 val_103 -104 val_104 -PREHOOK: query: SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key NULLS FIRST LIMIT 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key NULLS FIRST LIMIT 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -POSTHOOK: Output: hdfs://### HDFS PATH ### -0 val_0 -10 val_10 -100 val_100 -103 val_103 -104 val_104 -PREHOOK: query: CREATE TABLE t_test( - a int, - b int, - c int -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t_test -POSTHOOK: query: CREATE TABLE t_test( - a int, - b int, - c int -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t_test -PREHOOK: query: INSERT INTO t_test VALUES -(5, 2, 3), -(6, 2, 1), -(7, 8, 4), (7, 8, 4), (7, 8, 4), -(5, 1, 2), (5, 1, 2), (5, 1, 2) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t_test -POSTHOOK: query: INSERT INTO t_test VALUES -(5, 2, 3), -(6, 2, 1), -(7, 8, 4), (7, 8, 4), (7, 8, 4), -(5, 1, 2), (5, 1, 2), (5, 1, 2) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t_test -POSTHOOK: Lineage: t_test.a SCRIPT [] -POSTHOOK: Lineage: t_test.b SCRIPT [] -POSTHOOK: Lineage: t_test.c SCRIPT [] -PREHOOK: query: EXPLAIN -SELECT a, b FROM t_test ORDER BY a, b LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN -SELECT a, b FROM t_test ORDER BY a, b LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -Plan optimized by CBO. - -Vertex dependency in root stage -Reducer 2 <- Map 1 (SIMPLE_EDGE) - -Stage-0 - Fetch Operator - limit:3 - Stage-1 - Reducer 2 - File Output Operator [FS_5] - Limit [LIM_4] (rows=3 width=8) - Number of rows:3 - Select Operator [SEL_3] (rows=8 width=8) - Output:["_col0","_col1"] - <-Map 1 [SIMPLE_EDGE] - SHUFFLE [RS_2] - Select Operator [SEL_1] (rows=8 width=8) - Output:["_col0","_col1"] - Top N Key Operator [TNK_6] (rows=8 width=8) - keys:a, b,top n:3 - TableScan [TS_0] (rows=8 width=8) - default@t_test,t_test,Tbl:COMPLETE,Col:COMPLETE,Output:["a","b"] - -PREHOOK: query: SELECT a, b FROM t_test ORDER BY a, b LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, b FROM t_test ORDER BY a, b LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -5 1 -5 1 -5 1 -PREHOOK: query: SELECT a, b FROM t_test ORDER BY a, b LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, b FROM t_test ORDER BY a, b LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -5 1 -5 1 -5 1 -PREHOOK: query: EXPLAIN -SELECT a, b FROM t_test GROUP BY a, b ORDER BY a, b LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN -SELECT a, b FROM t_test GROUP BY a, b ORDER BY a, b LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -Plan optimized by CBO. - -Vertex dependency in root stage -Reducer 2 <- Map 1 (SIMPLE_EDGE) -Reducer 3 <- Reducer 2 (SIMPLE_EDGE) - -Stage-0 - Fetch Operator - limit:3 - Stage-1 - Reducer 3 - File Output Operator [FS_9] - Limit [LIM_8] (rows=3 width=8) - Number of rows:3 - Select Operator [SEL_7] (rows=4 width=8) - Output:["_col0","_col1"] - <-Reducer 2 [SIMPLE_EDGE] - SHUFFLE [RS_6] - Group By Operator [GBY_4] (rows=4 width=8) - Output:["_col0","_col1"],keys:KEY._col0, KEY._col1 - <-Map 1 [SIMPLE_EDGE] - SHUFFLE [RS_3] - PartitionCols:_col0, _col1 - Group By Operator [GBY_2] (rows=4 width=8) - Output:["_col0","_col1"],keys:a, b - Select Operator [SEL_1] (rows=8 width=8) - Output:["a","b"] - Top N Key Operator [TNK_13] (rows=8 width=8) - keys:a, b,top n:3 - TableScan [TS_0] (rows=8 width=8) - default@t_test,t_test,Tbl:COMPLETE,Col:COMPLETE,Output:["a","b"] - -PREHOOK: query: SELECT a, b FROM t_test GROUP BY a, b ORDER BY a, b LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, b FROM t_test GROUP BY a, b ORDER BY a, b LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -5 1 -5 2 -6 2 -PREHOOK: query: SELECT a, b FROM t_test GROUP BY a, b ORDER BY a, b LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, b FROM t_test GROUP BY a, b ORDER BY a, b LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -5 1 -5 2 -6 2 -PREHOOK: query: DROP TABLE t_test -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t_test -PREHOOK: Output: default@t_test -POSTHOOK: query: DROP TABLE t_test -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t_test -POSTHOOK: Output: default@t_test diff --git ql/src/test/results/clientpositive/tez/topnkey_order_null.q.out ql/src/test/results/clientpositive/tez/topnkey_order_null.q.out deleted file mode 100644 index 656c97d033..0000000000 --- ql/src/test/results/clientpositive/tez/topnkey_order_null.q.out +++ /dev/null @@ -1,282 +0,0 @@ -PREHOOK: query: CREATE TABLE t_test( - a int, - b int, - c int -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t_test -POSTHOOK: query: CREATE TABLE t_test( - a int, - b int, - c int -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t_test -PREHOOK: query: INSERT INTO t_test VALUES -(NULL, NULL, NULL), -(5, 2, 3), -(NULL, NULL, NULL), -(NULL, NULL, NULL), -(6, 2, 1), -(7, 8, 4), (7, 8, 4), (7, 8, 4), -(5, 1, 2), (5, 1, 2), (5, 1, 2), -(NULL, NULL, NULL) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t_test -POSTHOOK: query: INSERT INTO t_test VALUES -(NULL, NULL, NULL), -(5, 2, 3), -(NULL, NULL, NULL), -(NULL, NULL, NULL), -(6, 2, 1), -(7, 8, 4), (7, 8, 4), (7, 8, 4), -(5, 1, 2), (5, 1, 2), (5, 1, 2), -(NULL, NULL, NULL) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t_test -POSTHOOK: Lineage: t_test.a SCRIPT [] -POSTHOOK: Lineage: t_test.b SCRIPT [] -POSTHOOK: Lineage: t_test.c SCRIPT [] -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -5 4 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -5 4 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a NULLS FIRST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a NULLS FIRST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -NULL 0 -5 4 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a NULLS FIRST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a NULLS FIRST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -NULL 0 -5 4 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a NULLS LAST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a NULLS LAST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -5 4 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a NULLS LAST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a NULLS LAST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -5 4 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -5 4 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -5 4 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC NULLS FIRST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC NULLS FIRST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -NULL 0 -5 4 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC NULLS FIRST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC NULLS FIRST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -NULL 0 -5 4 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC NULLS LAST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC NULLS LAST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -5 4 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC NULLS LAST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC NULLS LAST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -5 4 -6 1 -PREHOOK: query: DROP TABLE IF EXISTS t_test -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t_test -PREHOOK: Output: default@t_test -POSTHOOK: query: DROP TABLE IF EXISTS t_test -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t_test -POSTHOOK: Output: default@t_test -PREHOOK: query: CREATE TABLE t_test( - a int, - b int, - c int -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t_test -POSTHOOK: query: CREATE TABLE t_test( - a int, - b int, - c int -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t_test -PREHOOK: query: INSERT INTO t_test VALUES -(7, 8, 4), (7, 8, 4), (7, 8, 4), -(NULL, NULL, NULL), -(5, 2, 3), -(NULL, NULL, NULL), -(NULL, NULL, NULL), -(6, 2, 1), -(5, 1, 2), (5, 1, 2), (5, 1, 2), -(NULL, NULL, NULL) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t_test -POSTHOOK: query: INSERT INTO t_test VALUES -(7, 8, 4), (7, 8, 4), (7, 8, 4), -(NULL, NULL, NULL), -(5, 2, 3), -(NULL, NULL, NULL), -(NULL, NULL, NULL), -(6, 2, 1), -(5, 1, 2), (5, 1, 2), (5, 1, 2), -(NULL, NULL, NULL) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t_test -POSTHOOK: Lineage: t_test.a SCRIPT [] -POSTHOOK: Lineage: t_test.b SCRIPT [] -POSTHOOK: Lineage: t_test.c SCRIPT [] -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -7 3 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -7 3 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC NULLS FIRST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC NULLS FIRST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -NULL 0 -7 3 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC NULLS FIRST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC NULLS FIRST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -NULL 0 -7 3 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC NULLS LAST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC NULLS LAST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -7 3 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC NULLS LAST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC NULLS LAST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -7 3 -6 1 -PREHOOK: query: DROP TABLE IF EXISTS t_test -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t_test -PREHOOK: Output: default@t_test -POSTHOOK: query: DROP TABLE IF EXISTS t_test -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t_test -POSTHOOK: Output: default@t_test diff --git ql/src/test/results/clientpositive/tez/vector_topnkey.q.out ql/src/test/results/clientpositive/tez/vector_topnkey.q.out deleted file mode 100644 index 95812a91aa..0000000000 --- ql/src/test/results/clientpositive/tez/vector_topnkey.q.out +++ /dev/null @@ -1,308 +0,0 @@ -PREHOOK: query: CREATE TABLE t_test( - cint1 int, - cint2 int, - cdouble double, - cvarchar varchar(50), - cdecimal1 decimal(10,2), - cdecimal2 decimal(38,5) -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t_test -POSTHOOK: query: CREATE TABLE t_test( - cint1 int, - cint2 int, - cdouble double, - cvarchar varchar(50), - cdecimal1 decimal(10,2), - cdecimal2 decimal(38,5) -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t_test -PREHOOK: query: INSERT INTO t_test VALUES -(NULL, NULL, NULL, NULL, NULL, NULL), -(8, 9, 2.0, 'one', 2.0, 2.0), (8, 9, 2.0, 'one', 2.0, 2.0), -(4, 2, 3.3, 'two', 3.3, 3.3), -(NULL, NULL, NULL, NULL, NULL, NULL), -(NULL, NULL, NULL, NULL, NULL, NULL), -(6, 2, 1.8, 'three', 1.8, 1.8), -(7, 8, 4.5, 'four', 4.5, 4.5), (7, 8, 4.5, 'four', 4.5, 4.5), (7, 8, 4.5, 'four', 4.5, 4.5), -(4, 1, 2.0, 'five', 2.0, 2.0), (4, 1, 2.0, 'five', 2.0, 2.0), (4, 1, 2.0, 'five', 2.0, 2.0), -(NULL, NULL, NULL, NULL, NULL, NULL) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t_test -POSTHOOK: query: INSERT INTO t_test VALUES -(NULL, NULL, NULL, NULL, NULL, NULL), -(8, 9, 2.0, 'one', 2.0, 2.0), (8, 9, 2.0, 'one', 2.0, 2.0), -(4, 2, 3.3, 'two', 3.3, 3.3), -(NULL, NULL, NULL, NULL, NULL, NULL), -(NULL, NULL, NULL, NULL, NULL, NULL), -(6, 2, 1.8, 'three', 1.8, 1.8), -(7, 8, 4.5, 'four', 4.5, 4.5), (7, 8, 4.5, 'four', 4.5, 4.5), (7, 8, 4.5, 'four', 4.5, 4.5), -(4, 1, 2.0, 'five', 2.0, 2.0), (4, 1, 2.0, 'five', 2.0, 2.0), (4, 1, 2.0, 'five', 2.0, 2.0), -(NULL, NULL, NULL, NULL, NULL, NULL) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t_test -POSTHOOK: Lineage: t_test.cdecimal1 SCRIPT [] -POSTHOOK: Lineage: t_test.cdecimal2 SCRIPT [] -POSTHOOK: Lineage: t_test.cdouble SCRIPT [] -POSTHOOK: Lineage: t_test.cint1 SCRIPT [] -POSTHOOK: Lineage: t_test.cint2 SCRIPT [] -POSTHOOK: Lineage: t_test.cvarchar SCRIPT [] -PREHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT cint1 FROM t_test GROUP BY cint1 ORDER BY cint1 LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT cint1 FROM t_test GROUP BY cint1 ORDER BY cint1 LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE) - Reducer 3 <- Reducer 2 (SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: t_test - Statistics: Num rows: 14 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:cint1:int, 1:cint2:int, 2:cdouble:double, 3:cvarchar:varchar(50), 4:cdecimal1:decimal(10,2)/DECIMAL_64, 5:cdecimal2:decimal(38,5), 6:ROW__ID:struct] - Top N Key Operator - sort order: + - keys: cint1 (type: int) - null sort order: z - Statistics: Num rows: 14 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE - top n: 3 - Top N Key Vectorization: - className: VectorTopNKeyOperator - keyExpressions: col 0:int - native: true - Select Operator - expressions: cint1 (type: int) - outputColumnNames: cint1 - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0] - Statistics: Num rows: 14 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - Group By Vectorization: - className: VectorGroupByOperator - groupByMode: HASH - keyExpressions: col 0:int - native: false - vectorProcessingMode: HASH - projectedOutputColumnNums: [] - keys: cint1 (type: int) - minReductionHashAggr: 0.64285713 - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: _col0 (type: int) - Reduce Sink Vectorization: - className: VectorReduceSinkLongOperator - keyColumns: 0:int - native: true - nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true - Statistics: Num rows: 5 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - Execution mode: vectorized - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true - inputFormatFeatureSupport: [DECIMAL_64] - featureSupportInUse: [DECIMAL_64] - inputFileFormats: org.apache.hadoop.mapred.TextInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 6 - includeColumns: [0] - dataColumns: cint1:int, cint2:int, cdouble:double, cvarchar:varchar(50), cdecimal1:decimal(10,2)/DECIMAL_64, cdecimal2:decimal(38,5) - partitionColumnCount: 0 - scratchColumnTypeNames: [] - Reducer 2 - Execution mode: vectorized - Reduce Vectorization: - enabled: true - enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true - reduceColumnNullOrder: z - reduceColumnSortOrder: + - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 1 - dataColumns: KEY._col0:int - partitionColumnCount: 0 - scratchColumnTypeNames: [] - Reduce Operator Tree: - Group By Operator - Group By Vectorization: - className: VectorGroupByOperator - groupByMode: MERGEPARTIAL - keyExpressions: col 0:int - native: false - vectorProcessingMode: MERGE_PARTIAL - projectedOutputColumnNums: [] - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - null sort order: z - sort order: + - Reduce Sink Vectorization: - className: VectorReduceSinkObjectHashOperator - keyColumns: 0:int - native: true - nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true - Statistics: Num rows: 5 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - Reducer 3 - Execution mode: vectorized - Reduce Vectorization: - enabled: true - enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true - reduceColumnNullOrder: z - reduceColumnSortOrder: + - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 1 - dataColumns: KEY.reducesinkkey0:int - partitionColumnCount: 0 - scratchColumnTypeNames: [] - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: int) - outputColumnNames: _col0 - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0] - Statistics: Num rows: 5 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE - Limit - Number of rows: 3 - Limit Vectorization: - className: VectorLimitOperator - native: true - Statistics: Num rows: 3 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - File Sink Vectorization: - className: VectorFileSinkOperator - native: false - Statistics: Num rows: 3 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: 3 - Processor Tree: - ListSink - -PREHOOK: query: SELECT cint1 FROM t_test GROUP BY cint1 ORDER BY cint1 LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT cint1 FROM t_test GROUP BY cint1 ORDER BY cint1 LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -4 -6 -7 -PREHOOK: query: SELECT cint1, cint2 FROM t_test GROUP BY cint1, cint2 ORDER BY cint1, cint2 LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT cint1, cint2 FROM t_test GROUP BY cint1, cint2 ORDER BY cint1, cint2 LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -4 1 -4 2 -6 2 -PREHOOK: query: SELECT cint1, cint2 FROM t_test GROUP BY cint1, cint2 ORDER BY cint1 DESC, cint2 LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT cint1, cint2 FROM t_test GROUP BY cint1, cint2 ORDER BY cint1 DESC, cint2 LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -8 9 -7 8 -6 2 -PREHOOK: query: SELECT cint1, cdouble FROM t_test GROUP BY cint1, cdouble ORDER BY cint1, cdouble LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT cint1, cdouble FROM t_test GROUP BY cint1, cdouble ORDER BY cint1, cdouble LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -4 2.0 -4 3.3 -6 1.8 -PREHOOK: query: SELECT cvarchar, cdouble FROM t_test GROUP BY cvarchar, cdouble ORDER BY cvarchar, cdouble LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT cvarchar, cdouble FROM t_test GROUP BY cvarchar, cdouble ORDER BY cvarchar, cdouble LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -five 2.0 -four 4.5 -one 2.0 -PREHOOK: query: SELECT cdecimal1, cdecimal2 FROM t_test GROUP BY cdecimal1, cdecimal2 ORDER BY cdecimal1, cdecimal2 LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -PREHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: query: SELECT cdecimal1, cdecimal2 FROM t_test GROUP BY cdecimal1, cdecimal2 ORDER BY cdecimal1, cdecimal2 LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -POSTHOOK: Output: hdfs://### HDFS PATH ### -1.80 1.80000 -2.00 2.00000 -3.30 3.30000 -PREHOOK: query: DROP TABLE t_test -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t_test -PREHOOK: Output: default@t_test -POSTHOOK: query: DROP TABLE t_test -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t_test -POSTHOOK: Output: default@t_test diff --git ql/src/test/results/clientpositive/topnkey.q.out ql/src/test/results/clientpositive/topnkey.q.out deleted file mode 100644 index 9cde4e12bb..0000000000 --- ql/src/test/results/clientpositive/topnkey.q.out +++ /dev/null @@ -1,650 +0,0 @@ -PREHOOK: query: EXPLAIN -SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: string), UDFToInteger(substr(value, 5)) (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: sum(_col1) - keys: _col0 (type: string) - minReductionHashAggr: 0.99 - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: z - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - value expressions: _col1 (type: bigint) - Reduce Operator Tree: - Group By Operator - aggregations: sum(VALUE._col0) - keys: KEY._col0 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: z - sort order: + - Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - value expressions: _col1 (type: bigint) - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 250 Data size: 23750 Basic stats: COMPLETE Column stats: COMPLETE - Limit - Number of rows: 5 - Statistics: Num rows: 5 Data size: 475 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 5 Data size: 475 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: 5 - Processor Tree: - ListSink - -PREHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 0 -10 10 -100 200 -103 206 -104 208 -PREHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT key, SUM(CAST(SUBSTR(value,5) AS INT)) FROM src GROUP BY key ORDER BY key LIMIT 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 0 -10 10 -100 200 -103 206 -104 208 -PREHOOK: query: EXPLAIN -SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key LIMIT 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key LIMIT 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-3 depends on stages: Stage-2 - Stage-0 depends on stages: Stage-3 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src1 - Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: z - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE - TableScan - alias: src2 - filterExpr: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: z - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Reduce Operator Tree: - Join Operator - condition map: - Left Outer Join 0 to 1 - keys: - 0 _col0 (type: string) - 1 _col0 (type: string) - outputColumnNames: _col0, _col2 - Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - keys: _col0 (type: string), _col2 (type: string) - minReductionHashAggr: 0.99 - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 395 Data size: 70310 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: string) - null sort order: zz - sort order: ++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 395 Data size: 70310 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: string), KEY._col1 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 395 Data size: 70310 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: z - sort order: + - Statistics: Num rows: 395 Data size: 70310 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - value expressions: _col1 (type: string) - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 395 Data size: 70310 Basic stats: COMPLETE Column stats: COMPLETE - Limit - Number of rows: 5 - Statistics: Num rows: 5 Data size: 890 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 5 Data size: 890 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: 5 - Processor Tree: - ListSink - -PREHOOK: query: SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key LIMIT 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key LIMIT 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -10 val_10 -100 val_100 -103 val_103 -104 val_104 -PREHOOK: query: SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key LIMIT 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key LIMIT 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -10 val_10 -100 val_100 -103 val_103 -104 val_104 -PREHOOK: query: EXPLAIN -SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key NULLS FIRST LIMIT 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key NULLS FIRST LIMIT 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-3 depends on stages: Stage-2 - Stage-0 depends on stages: Stage-3 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: src1 - Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: string) - outputColumnNames: _col0 - Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: z - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE - TableScan - alias: src2 - filterExpr: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: z - sort order: + - Map-reduce partition columns: _col0 (type: string) - Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string) - Reduce Operator Tree: - Join Operator - condition map: - Left Outer Join 0 to 1 - keys: - 0 _col0 (type: string) - 1 _col0 (type: string) - outputColumnNames: _col0, _col2 - Statistics: Num rows: 791 Data size: 140798 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - keys: _col0 (type: string), _col2 (type: string) - minReductionHashAggr: 0.99 - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 395 Data size: 70310 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - key expressions: _col0 (type: string), _col1 (type: string) - null sort order: az - sort order: ++ - Map-reduce partition columns: _col0 (type: string), _col1 (type: string) - Statistics: Num rows: 395 Data size: 70310 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: string), KEY._col1 (type: string) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 395 Data size: 70310 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-3 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - key expressions: _col0 (type: string) - null sort order: a - sort order: + - Statistics: Num rows: 395 Data size: 70310 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - value expressions: _col1 (type: string) - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 395 Data size: 70310 Basic stats: COMPLETE Column stats: COMPLETE - Limit - Number of rows: 5 - Statistics: Num rows: 5 Data size: 890 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 5 Data size: 890 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: 5 - Processor Tree: - ListSink - -PREHOOK: query: SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key NULLS FIRST LIMIT 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key NULLS FIRST LIMIT 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -10 val_10 -100 val_100 -103 val_103 -104 val_104 -PREHOOK: query: SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key NULLS FIRST LIMIT 5 -PREHOOK: type: QUERY -PREHOOK: Input: default@src -#### A masked pattern was here #### -POSTHOOK: query: SELECT src1.key, src2.value FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key) GROUP BY src1.key, src2.value ORDER BY src1.key NULLS FIRST LIMIT 5 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@src -#### A masked pattern was here #### -0 val_0 -10 val_10 -100 val_100 -103 val_103 -104 val_104 -PREHOOK: query: CREATE TABLE t_test( - a int, - b int, - c int -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t_test -POSTHOOK: query: CREATE TABLE t_test( - a int, - b int, - c int -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t_test -PREHOOK: query: INSERT INTO t_test VALUES -(5, 2, 3), -(6, 2, 1), -(7, 8, 4), (7, 8, 4), (7, 8, 4), -(5, 1, 2), (5, 1, 2), (5, 1, 2) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t_test -POSTHOOK: query: INSERT INTO t_test VALUES -(5, 2, 3), -(6, 2, 1), -(7, 8, 4), (7, 8, 4), (7, 8, 4), -(5, 1, 2), (5, 1, 2), (5, 1, 2) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t_test -POSTHOOK: Lineage: t_test.a SCRIPT [] -POSTHOOK: Lineage: t_test.b SCRIPT [] -POSTHOOK: Lineage: t_test.c SCRIPT [] -PREHOOK: query: EXPLAIN -SELECT a, b FROM t_test ORDER BY a, b LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT a, b FROM t_test ORDER BY a, b LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t_test - Statistics: Num rows: 8 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: a (type: int), b (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 8 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: int) - null sort order: zz - sort order: ++ - Statistics: Num rows: 8 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 8 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE - Limit - Number of rows: 3 - Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: 3 - Processor Tree: - ListSink - -PREHOOK: query: SELECT a, b FROM t_test ORDER BY a, b LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, b FROM t_test ORDER BY a, b LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -5 1 -5 1 -5 1 -PREHOOK: query: SELECT a, b FROM t_test ORDER BY a, b LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, b FROM t_test ORDER BY a, b LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -5 1 -5 1 -5 1 -PREHOOK: query: EXPLAIN -SELECT a, b FROM t_test GROUP BY a, b ORDER BY a, b LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT a, b FROM t_test GROUP BY a, b ORDER BY a, b LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t_test - Statistics: Num rows: 8 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: a (type: int), b (type: int) - outputColumnNames: a, b - Statistics: Num rows: 8 Data size: 64 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - keys: a (type: int), b (type: int) - minReductionHashAggr: 0.99 - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: int) - null sort order: zz - sort order: ++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: int) - Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: int), KEY._col1 (type: int) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: int) - null sort order: zz - sort order: ++ - Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE - Limit - Number of rows: 3 - Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: 3 - Processor Tree: - ListSink - -PREHOOK: query: SELECT a, b FROM t_test GROUP BY a, b ORDER BY a, b LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, b FROM t_test GROUP BY a, b ORDER BY a, b LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -5 1 -5 2 -6 2 -PREHOOK: query: SELECT a, b FROM t_test GROUP BY a, b ORDER BY a, b LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, b FROM t_test GROUP BY a, b ORDER BY a, b LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -5 1 -5 2 -6 2 -PREHOOK: query: DROP TABLE t_test -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t_test -PREHOOK: Output: default@t_test -POSTHOOK: query: DROP TABLE t_test -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t_test -POSTHOOK: Output: default@t_test diff --git ql/src/test/results/clientpositive/topnkey_grouping_sets_functions.q.out ql/src/test/results/clientpositive/topnkey_grouping_sets_functions.q.out deleted file mode 100644 index 6d56d94309..0000000000 --- ql/src/test/results/clientpositive/topnkey_grouping_sets_functions.q.out +++ /dev/null @@ -1,419 +0,0 @@ -PREHOOK: query: CREATE TABLE t_test_grouping_sets( - a int, - b int, - c int -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t_test_grouping_sets -POSTHOOK: query: CREATE TABLE t_test_grouping_sets( - a int, - b int, - c int -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t_test_grouping_sets -PREHOOK: query: INSERT INTO t_test_grouping_sets VALUES -(NULL, NULL, NULL), -(5, 2, 3), -(10, 11, 12), -(NULL, NULL, NULL), -(NULL, NULL, NULL), -(6, 2, 1), -(7, 8, 4), (7, 8, 4), (7, 8, 4), -(5, 1, 2), (5, 1, 2), (5, 1, 2), -(NULL, NULL, NULL) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t_test_grouping_sets -POSTHOOK: query: INSERT INTO t_test_grouping_sets VALUES -(NULL, NULL, NULL), -(5, 2, 3), -(10, 11, 12), -(NULL, NULL, NULL), -(NULL, NULL, NULL), -(6, 2, 1), -(7, 8, 4), (7, 8, 4), (7, 8, 4), -(5, 1, 2), (5, 1, 2), (5, 1, 2), -(NULL, NULL, NULL) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t_test_grouping_sets -POSTHOOK: Lineage: t_test_grouping_sets.a SCRIPT [] -POSTHOOK: Lineage: t_test_grouping_sets.b SCRIPT [] -POSTHOOK: Lineage: t_test_grouping_sets.c SCRIPT [] -PREHOOK: query: EXPLAIN -SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t_test_grouping_sets - Statistics: Num rows: 13 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: a (type: int), b (type: int), c (type: int) - outputColumnNames: a, b, c - Statistics: Num rows: 13 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: sum(c) - keys: a (type: int), b (type: int), 0L (type: bigint) - minReductionHashAggr: 0.99 - mode: hash - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 26 Data size: 568 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) - null sort order: zzz - sort order: +++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) - Statistics: Num rows: 26 Data size: 568 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col3 (type: bigint) - Reduce Operator Tree: - Group By Operator - aggregations: sum(VALUE._col0) - keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint) - mode: mergepartial - outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 26 Data size: 568 Basic stats: COMPLETE Column stats: COMPLETE - pruneGroupingSetId: true - Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col3 (type: bigint) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - key expressions: _col1 (type: int) - null sort order: z - sort order: + - Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - value expressions: _col0 (type: int), _col2 (type: bigint) - Reduce Operator Tree: - Select Operator - expressions: VALUE._col0 (type: int), KEY.reducesinkkey0 (type: int), VALUE._col1 (type: bigint) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - Limit - Number of rows: 7 - Statistics: Num rows: 7 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 7 Data size: 104 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: 7 - Processor Tree: - ListSink - -PREHOOK: query: SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -NULL 1 6 -5 1 6 -6 2 1 -NULL 2 4 -5 2 3 -7 8 12 -NULL 8 12 -PREHOOK: query: SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, sum(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -NULL 1 6 -5 1 6 -6 2 1 -NULL 2 4 -5 2 3 -7 8 12 -NULL 8 12 -PREHOOK: query: EXPLAIN -SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t_test_grouping_sets - Statistics: Num rows: 13 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: a (type: int), b (type: int), c (type: int) - outputColumnNames: a, b, c - Statistics: Num rows: 13 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: min(c) - keys: a (type: int), b (type: int), 0L (type: bigint) - minReductionHashAggr: 0.99 - mode: hash - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 26 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) - null sort order: zzz - sort order: +++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) - Statistics: Num rows: 26 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col3 (type: int) - Reduce Operator Tree: - Group By Operator - aggregations: min(VALUE._col0) - keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint) - mode: mergepartial - outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 26 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE - pruneGroupingSetId: true - Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col3 (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 26 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - key expressions: _col1 (type: int) - null sort order: z - sort order: + - Statistics: Num rows: 26 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - value expressions: _col0 (type: int), _col2 (type: int) - Reduce Operator Tree: - Select Operator - expressions: VALUE._col0 (type: int), KEY.reducesinkkey0 (type: int), VALUE._col1 (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 26 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE - Limit - Number of rows: 7 - Statistics: Num rows: 7 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 7 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: 7 - Processor Tree: - ListSink - -PREHOOK: query: SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -NULL 1 2 -5 1 2 -6 2 1 -NULL 2 1 -5 2 3 -7 8 4 -NULL 8 4 -PREHOOK: query: SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, min(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((b,a), (a), (b), ()) ORDER BY b LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -NULL 1 2 -5 1 2 -6 2 1 -NULL 2 1 -5 2 3 -7 8 4 -NULL 8 4 -PREHOOK: query: EXPLAIN -SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t_test_grouping_sets - Statistics: Num rows: 13 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: a (type: int), b (type: int), c (type: int) - outputColumnNames: a, b, c - Statistics: Num rows: 13 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: max(c) - keys: a (type: int), b (type: int), 0L (type: bigint) - minReductionHashAggr: 0.99 - mode: hash - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 26 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) - null sort order: zzz - sort order: +++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) - Statistics: Num rows: 26 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col3 (type: int) - Reduce Operator Tree: - Group By Operator - aggregations: max(VALUE._col0) - keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint) - mode: mergepartial - outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 26 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE - pruneGroupingSetId: true - Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col3 (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 26 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - key expressions: _col1 (type: int) - null sort order: z - sort order: + - Statistics: Num rows: 26 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - value expressions: _col0 (type: int), _col2 (type: int) - Reduce Operator Tree: - Select Operator - expressions: VALUE._col0 (type: int), KEY.reducesinkkey0 (type: int), VALUE._col1 (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 26 Data size: 256 Basic stats: COMPLETE Column stats: COMPLETE - Limit - Number of rows: 7 - Statistics: Num rows: 7 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 7 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: 7 - Processor Tree: - ListSink - -PREHOOK: query: SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -NULL 1 2 -5 1 2 -6 2 1 -NULL 2 3 -5 2 3 -7 8 4 -NULL 8 4 -PREHOOK: query: SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, b, max(c) FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -NULL 1 2 -5 1 2 -6 2 1 -NULL 2 3 -5 2 3 -7 8 4 -NULL 8 4 -PREHOOK: query: DROP TABLE IF EXISTS t_test_grouping_sets -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t_test_grouping_sets -PREHOOK: Output: default@t_test_grouping_sets -POSTHOOK: query: DROP TABLE IF EXISTS t_test_grouping_sets -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t_test_grouping_sets -POSTHOOK: Output: default@t_test_grouping_sets diff --git ql/src/test/results/clientpositive/topnkey_grouping_sets_order.q.out ql/src/test/results/clientpositive/topnkey_grouping_sets_order.q.out deleted file mode 100644 index 7089fcdabf..0000000000 --- ql/src/test/results/clientpositive/topnkey_grouping_sets_order.q.out +++ /dev/null @@ -1,513 +0,0 @@ -PREHOOK: query: CREATE TABLE t_test_grouping_sets( - a int, - b int, - c int -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t_test_grouping_sets -POSTHOOK: query: CREATE TABLE t_test_grouping_sets( - a int, - b int, - c int -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t_test_grouping_sets -PREHOOK: query: INSERT INTO t_test_grouping_sets VALUES -(NULL, NULL, NULL), -(5, 2, 3), -(10, 11, 12), -(NULL, NULL, NULL), -(NULL, NULL, NULL), -(6, 2, 1), -(7, 8, 4), (7, 8, 4), (7, 8, 4), -(5, 1, 2), (5, 1, 2), (5, 1, 2), -(NULL, NULL, NULL) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t_test_grouping_sets -POSTHOOK: query: INSERT INTO t_test_grouping_sets VALUES -(NULL, NULL, NULL), -(5, 2, 3), -(10, 11, 12), -(NULL, NULL, NULL), -(NULL, NULL, NULL), -(6, 2, 1), -(7, 8, 4), (7, 8, 4), (7, 8, 4), -(5, 1, 2), (5, 1, 2), (5, 1, 2), -(NULL, NULL, NULL) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t_test_grouping_sets -POSTHOOK: Lineage: t_test_grouping_sets.a SCRIPT [] -POSTHOOK: Lineage: t_test_grouping_sets.b SCRIPT [] -POSTHOOK: Lineage: t_test_grouping_sets.c SCRIPT [] -PREHOOK: query: EXPLAIN -SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t_test_grouping_sets - Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: a (type: int), b (type: int) - outputColumnNames: a, b - Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - keys: a (type: int), b (type: int), 0L (type: bigint) - minReductionHashAggr: 0.99 - mode: hash - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) - null sort order: zzz - sort order: +++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) - Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - pruneGroupingSetId: true - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - key expressions: _col1 (type: int) - null sort order: a - sort order: + - Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - value expressions: _col0 (type: int) - Reduce Operator Tree: - Select Operator - expressions: VALUE._col0 (type: int), KEY.reducesinkkey0 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 152 Basic stats: COMPLETE Column stats: COMPLETE - Limit - Number of rows: 7 - Statistics: Num rows: 7 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 7 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: 7 - Processor Tree: - ListSink - -PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -7 NULL -NULL NULL -6 NULL -10 NULL -NULL NULL -NULL NULL -5 NULL -PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS FIRST LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -7 NULL -NULL NULL -6 NULL -10 NULL -NULL NULL -NULL NULL -5 NULL -PREHOOK: query: EXPLAIN -SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t_test_grouping_sets - Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: a (type: int), b (type: int) - outputColumnNames: a, b - Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - keys: a (type: int), b (type: int), 0L (type: bigint) - minReductionHashAggr: 0.99 - mode: hash - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) - null sort order: zzz - sort order: +++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) - Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - pruneGroupingSetId: true - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - key expressions: _col1 (type: int) - null sort order: z - sort order: + - Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - value expressions: _col0 (type: int) - Reduce Operator Tree: - Select Operator - expressions: VALUE._col0 (type: int), KEY.reducesinkkey0 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 152 Basic stats: COMPLETE Column stats: COMPLETE - Limit - Number of rows: 7 - Statistics: Num rows: 7 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 7 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: 7 - Processor Tree: - ListSink - -PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -NULL 1 -5 1 -6 2 -NULL 2 -5 2 -7 8 -NULL 8 -PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY b NULLS LAST LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -NULL 1 -5 1 -6 2 -NULL 2 -5 2 -7 8 -NULL 8 -PREHOOK: query: EXPLAIN -SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a DESC, b ASC LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a DESC, b ASC LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t_test_grouping_sets - Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: a (type: int), b (type: int) - outputColumnNames: a, b - Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - keys: a (type: int), b (type: int), 0L (type: bigint) - minReductionHashAggr: 0.99 - mode: hash - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) - null sort order: zzz - sort order: -++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) - Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - pruneGroupingSetId: true - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: int) - null sort order: zz - sort order: -+ - Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 152 Basic stats: COMPLETE Column stats: COMPLETE - Limit - Number of rows: 7 - Statistics: Num rows: 7 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 7 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: 7 - Processor Tree: - ListSink - -PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a DESC, b ASC LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a DESC, b ASC LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -10 11 -10 NULL -7 8 -7 NULL -6 2 -6 NULL -5 1 -PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a DESC, b ASC LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a DESC, b ASC LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -10 11 -10 NULL -7 8 -7 NULL -6 2 -6 NULL -5 1 -PREHOOK: query: EXPLAIN -SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a DESC NULLS FIRST, b ASC NULLS FIRST LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a DESC NULLS FIRST, b ASC NULLS FIRST LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t_test_grouping_sets - Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: a (type: int), b (type: int) - outputColumnNames: a, b - Statistics: Num rows: 13 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - keys: a (type: int), b (type: int), 0L (type: bigint) - minReductionHashAggr: 0.99 - mode: hash - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) - null sort order: aaz - sort order: -++ - Map-reduce partition columns: _col0 (type: int), _col1 (type: int), _col2 (type: bigint) - Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: int), KEY._col1 (type: int), KEY._col2 (type: bigint) - mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - pruneGroupingSetId: true - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - Reduce Output Operator - key expressions: _col0 (type: int), _col1 (type: int) - null sort order: aa - sort order: -+ - Statistics: Num rows: 26 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 152 Basic stats: COMPLETE Column stats: COMPLETE - Limit - Number of rows: 7 - Statistics: Num rows: 7 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 7 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: 7 - Processor Tree: - ListSink - -PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a DESC NULLS FIRST, b ASC NULLS FIRST LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a DESC NULLS FIRST, b ASC NULLS FIRST LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -NULL NULL -NULL NULL -NULL NULL -NULL NULL -NULL 1 -NULL 2 -NULL 8 -PREHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a DESC NULLS FIRST, b ASC NULLS FIRST LIMIT 7 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, b FROM t_test_grouping_sets GROUP BY a,b GROUPING SETS ((a,b), (a), (b), ()) ORDER BY a DESC NULLS FIRST, b ASC NULLS FIRST LIMIT 7 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test_grouping_sets -#### A masked pattern was here #### -NULL NULL -NULL NULL -NULL NULL -NULL NULL -NULL 1 -NULL 2 -NULL 8 -PREHOOK: query: DROP TABLE IF EXISTS t_test_grouping_sets -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t_test_grouping_sets -PREHOOK: Output: default@t_test_grouping_sets -POSTHOOK: query: DROP TABLE IF EXISTS t_test_grouping_sets -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t_test_grouping_sets -POSTHOOK: Output: default@t_test_grouping_sets diff --git ql/src/test/results/clientpositive/topnkey_order_null.q.out ql/src/test/results/clientpositive/topnkey_order_null.q.out deleted file mode 100644 index fef6b702d8..0000000000 --- ql/src/test/results/clientpositive/topnkey_order_null.q.out +++ /dev/null @@ -1,282 +0,0 @@ -PREHOOK: query: CREATE TABLE t_test( - a int, - b int, - c int -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t_test -POSTHOOK: query: CREATE TABLE t_test( - a int, - b int, - c int -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t_test -PREHOOK: query: INSERT INTO t_test VALUES -(NULL, NULL, NULL), -(5, 2, 3), -(NULL, NULL, NULL), -(NULL, NULL, NULL), -(6, 2, 1), -(7, 8, 4), (7, 8, 4), (7, 8, 4), -(5, 1, 2), (5, 1, 2), (5, 1, 2), -(NULL, NULL, NULL) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t_test -POSTHOOK: query: INSERT INTO t_test VALUES -(NULL, NULL, NULL), -(5, 2, 3), -(NULL, NULL, NULL), -(NULL, NULL, NULL), -(6, 2, 1), -(7, 8, 4), (7, 8, 4), (7, 8, 4), -(5, 1, 2), (5, 1, 2), (5, 1, 2), -(NULL, NULL, NULL) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t_test -POSTHOOK: Lineage: t_test.a SCRIPT [] -POSTHOOK: Lineage: t_test.b SCRIPT [] -POSTHOOK: Lineage: t_test.c SCRIPT [] -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -5 4 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -5 4 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a NULLS FIRST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a NULLS FIRST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -NULL 0 -5 4 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a NULLS FIRST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a NULLS FIRST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -NULL 0 -5 4 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a NULLS LAST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a NULLS LAST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -5 4 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a NULLS LAST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a NULLS LAST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -5 4 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -5 4 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -5 4 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC NULLS FIRST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC NULLS FIRST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -NULL 0 -5 4 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC NULLS FIRST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC NULLS FIRST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -NULL 0 -5 4 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC NULLS LAST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC NULLS LAST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -5 4 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC NULLS LAST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a ASC NULLS LAST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -5 4 -6 1 -PREHOOK: query: DROP TABLE IF EXISTS t_test -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t_test -PREHOOK: Output: default@t_test -POSTHOOK: query: DROP TABLE IF EXISTS t_test -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t_test -POSTHOOK: Output: default@t_test -PREHOOK: query: CREATE TABLE t_test( - a int, - b int, - c int -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t_test -POSTHOOK: query: CREATE TABLE t_test( - a int, - b int, - c int -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t_test -PREHOOK: query: INSERT INTO t_test VALUES -(7, 8, 4), (7, 8, 4), (7, 8, 4), -(NULL, NULL, NULL), -(5, 2, 3), -(NULL, NULL, NULL), -(NULL, NULL, NULL), -(6, 2, 1), -(5, 1, 2), (5, 1, 2), (5, 1, 2), -(NULL, NULL, NULL) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t_test -POSTHOOK: query: INSERT INTO t_test VALUES -(7, 8, 4), (7, 8, 4), (7, 8, 4), -(NULL, NULL, NULL), -(5, 2, 3), -(NULL, NULL, NULL), -(NULL, NULL, NULL), -(6, 2, 1), -(5, 1, 2), (5, 1, 2), (5, 1, 2), -(NULL, NULL, NULL) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t_test -POSTHOOK: Lineage: t_test.a SCRIPT [] -POSTHOOK: Lineage: t_test.b SCRIPT [] -POSTHOOK: Lineage: t_test.c SCRIPT [] -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -7 3 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -7 3 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC NULLS FIRST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC NULLS FIRST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -NULL 0 -7 3 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC NULLS FIRST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC NULLS FIRST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -NULL 0 -7 3 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC NULLS LAST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC NULLS LAST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -7 3 -6 1 -PREHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC NULLS LAST LIMIT 2 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT a, count(b) FROM t_test GROUP BY a ORDER BY a DESC NULLS LAST LIMIT 2 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -7 3 -6 1 -PREHOOK: query: DROP TABLE IF EXISTS t_test -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t_test -PREHOOK: Output: default@t_test -POSTHOOK: query: DROP TABLE IF EXISTS t_test -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t_test -POSTHOOK: Output: default@t_test diff --git ql/src/test/results/clientpositive/topnkey_windowing.q.out ql/src/test/results/clientpositive/topnkey_windowing.q.out deleted file mode 100644 index 9f64dcaeea..0000000000 --- ql/src/test/results/clientpositive/topnkey_windowing.q.out +++ /dev/null @@ -1,877 +0,0 @@ -PREHOOK: query: CREATE TABLE topnkey_windowing (tw_code string, tw_value double) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@topnkey_windowing -POSTHOOK: query: CREATE TABLE topnkey_windowing (tw_code string, tw_value double) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@topnkey_windowing -PREHOOK: query: INSERT INTO topnkey_windowing VALUES - (NULL, NULL), - (NULL, 109), - ('A', 109), - ('A', 104), - ('A', 109), - ('A', 109), - ('A', 103), - (NULL, NULL), - (NULL, 109), - ('A', 109), - ('A', 101), - ('A', 101), - ('A', 114), - ('A', 120), - ('B', 105), - ('B', 106), - ('B', 106), - ('B', NULL), - ('B', 106), - ('A', 107), - ('B', 108), - ('A', 102), - ('B', 110), - (NULL, NULL), - (NULL, 109), - ('A', 109) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@topnkey_windowing -POSTHOOK: query: INSERT INTO topnkey_windowing VALUES - (NULL, NULL), - (NULL, 109), - ('A', 109), - ('A', 104), - ('A', 109), - ('A', 109), - ('A', 103), - (NULL, NULL), - (NULL, 109), - ('A', 109), - ('A', 101), - ('A', 101), - ('A', 114), - ('A', 120), - ('B', 105), - ('B', 106), - ('B', 106), - ('B', NULL), - ('B', 106), - ('A', 107), - ('B', 108), - ('A', 102), - ('B', 110), - (NULL, NULL), - (NULL, 109), - ('A', 109) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@topnkey_windowing -POSTHOOK: Lineage: topnkey_windowing.tw_code SCRIPT [] -POSTHOOK: Lineage: topnkey_windowing.tw_value SCRIPT [] -PREHOOK: query: EXPLAIN -SELECT tw_code, ranking -FROM ( - SELECT tw_code AS tw_code, - rank() OVER (PARTITION BY tw_code ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT tw_code, ranking -FROM ( - SELECT tw_code AS tw_code, - rank() OVER (PARTITION BY tw_code ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: topnkey_windowing - Statistics: Num rows: 26 Data size: 1969 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: tw_code (type: string), tw_value (type: double) - null sort order: az - sort order: ++ - Map-reduce partition columns: tw_code (type: string) - Statistics: Num rows: 26 Data size: 1969 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 8937 Basic stats: COMPLETE Column stats: COMPLETE - PTF Operator - Function definitions: - Input definition - input alias: ptf_0 - output shape: _col0: string, _col1: double - type: WINDOWING - Windowing table definition - input alias: ptf_1 - name: windowingtablefunction - order by: _col1 ASC NULLS LAST - partition by: _col0 - raw input shape: - window functions: - window function definition - alias: rank_window_0 - arguments: _col1 - name: rank - window function: GenericUDAFRankEvaluator - window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) - isPivotResult: true - Statistics: Num rows: 26 Data size: 8937 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (rank_window_0 <= 3) (type: boolean) - Statistics: Num rows: 8 Data size: 2625 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: string), rank_window_0 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 8 Data size: 457 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 8 Data size: 457 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT tw_code, ranking -FROM ( - SELECT tw_code AS tw_code, - rank() OVER (PARTITION BY tw_code ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: SELECT tw_code, ranking -FROM ( - SELECT tw_code AS tw_code, - rank() OVER (PARTITION BY tw_code ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -NULL 1 -NULL 1 -NULL 1 -A 1 -A 1 -A 3 -B 1 -B 2 -B 2 -B 2 -PREHOOK: query: EXPLAIN -SELECT tw_code, ranking -FROM ( - SELECT tw_code AS tw_code, - rank() OVER (PARTITION BY tw_code ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT tw_code, ranking -FROM ( - SELECT tw_code AS tw_code, - rank() OVER (PARTITION BY tw_code ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: topnkey_windowing - Statistics: Num rows: 26 Data size: 1969 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: tw_code (type: string), tw_value (type: double) - null sort order: az - sort order: ++ - Map-reduce partition columns: tw_code (type: string) - Statistics: Num rows: 26 Data size: 1969 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - Execution mode: vectorized - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 8937 Basic stats: COMPLETE Column stats: COMPLETE - PTF Operator - Function definitions: - Input definition - input alias: ptf_0 - output shape: _col0: string, _col1: double - type: WINDOWING - Windowing table definition - input alias: ptf_1 - name: windowingtablefunction - order by: _col1 ASC NULLS LAST - partition by: _col0 - raw input shape: - window functions: - window function definition - alias: rank_window_0 - arguments: _col1 - name: rank - window function: GenericUDAFRankEvaluator - window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) - isPivotResult: true - Statistics: Num rows: 26 Data size: 8937 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (rank_window_0 <= 3) (type: boolean) - Statistics: Num rows: 8 Data size: 2625 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: string), rank_window_0 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 8 Data size: 457 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 8 Data size: 457 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT tw_code, ranking -FROM ( - SELECT tw_code AS tw_code, - rank() OVER (PARTITION BY tw_code ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: SELECT tw_code, ranking -FROM ( - SELECT tw_code AS tw_code, - rank() OVER (PARTITION BY tw_code ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -NULL 1 -NULL 1 -NULL 1 -A 1 -A 1 -A 3 -B 1 -B 2 -B 2 -B 2 -PREHOOK: query: SELECT tw_code, ranking -FROM ( - SELECT tw_code AS tw_code, - rank() OVER (PARTITION BY tw_code ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: SELECT tw_code, ranking -FROM ( - SELECT tw_code AS tw_code, - rank() OVER (PARTITION BY tw_code ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -NULL 1 -NULL 1 -NULL 1 -A 1 -A 1 -A 3 -B 1 -B 2 -B 2 -B 2 -PREHOOK: query: EXPLAIN extended -SELECT tw_code, ranking -FROM ( - SELECT tw_code as tw_code, - rank() OVER (ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN extended -SELECT tw_code, ranking -FROM ( - SELECT tw_code as tw_code, - rank() OVER (ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -OPTIMIZED SQL: SELECT * -FROM (SELECT `tw_code`, RANK() OVER (PARTITION BY 0 ORDER BY `tw_value` ROWS BETWEEN 2147483647 FOLLOWING AND 2147483647 PRECEDING) AS `rank_window_0` -FROM `default`.`topnkey_windowing`) AS `t` -WHERE `rank_window_0` <= 3 -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: topnkey_windowing - Statistics: Num rows: 26 Data size: 1969 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Reduce Output Operator - key expressions: 0 (type: int), tw_value (type: double) - null sort order: az - sort order: ++ - Map-reduce partition columns: 0 (type: int) - Statistics: Num rows: 26 Data size: 1969 Basic stats: COMPLETE Column stats: COMPLETE - tag: -1 - TopN: 4 - TopN Hash Memory Usage: 0.1 - value expressions: tw_code (type: string) - auto parallelism: false - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: topnkey_windowing - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"tw_code":"true","tw_value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns tw_code,tw_value - columns.comments - columns.types string:double -#### A masked pattern was here #### - name default.topnkey_windowing - numFiles 1 - numRows 26 - rawDataSize 176 - serialization.ddl struct topnkey_windowing { string tw_code, double tw_value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 202 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"tw_code":"true","tw_value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns tw_code,tw_value - columns.comments - columns.types string:double -#### A masked pattern was here #### - name default.topnkey_windowing - numFiles 1 - numRows 26 - rawDataSize 176 - serialization.ddl struct topnkey_windowing { string tw_code, double tw_value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 202 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.topnkey_windowing - name: default.topnkey_windowing - Truncated Path -> Alias: - /topnkey_windowing [$hdt$_0:topnkey_windowing] - Needs Tagging: false - Reduce Operator Tree: - Select Operator - expressions: VALUE._col0 (type: string), KEY.reducesinkkey1 (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 8937 Basic stats: COMPLETE Column stats: COMPLETE - PTF Operator - Function definitions: - Input definition - input alias: ptf_0 - output shape: _col0: string, _col1: double - type: WINDOWING - Windowing table definition - input alias: ptf_1 - name: windowingtablefunction - order by: _col1 ASC NULLS LAST - partition by: 0 - raw input shape: - window functions: - window function definition - alias: rank_window_0 - arguments: _col1 - name: rank - window function: GenericUDAFRankEvaluator - window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) - isPivotResult: true - Statistics: Num rows: 26 Data size: 8937 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - isSamplingPred: false - predicate: (rank_window_0 <= 3) (type: boolean) - Statistics: Num rows: 8 Data size: 2625 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: string), rank_window_0 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 8 Data size: 457 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 8 Data size: 457 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types string:int - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT tw_code, ranking -FROM ( - SELECT tw_code as tw_code, - rank() OVER (ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: SELECT tw_code, ranking -FROM ( - SELECT tw_code as tw_code, - rank() OVER (ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -A 1 -A 1 -A 3 -PREHOOK: query: EXPLAIN extended -SELECT tw_code, ranking -FROM ( - SELECT tw_code as tw_code, - rank() OVER (ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN extended -SELECT tw_code, ranking -FROM ( - SELECT tw_code as tw_code, - rank() OVER (ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -OPTIMIZED SQL: SELECT * -FROM (SELECT `tw_code`, RANK() OVER (PARTITION BY 0 ORDER BY `tw_value` ROWS BETWEEN 2147483647 FOLLOWING AND 2147483647 PRECEDING) AS `rank_window_0` -FROM `default`.`topnkey_windowing`) AS `t` -WHERE `rank_window_0` <= 3 -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: topnkey_windowing - Statistics: Num rows: 26 Data size: 1969 Basic stats: COMPLETE Column stats: COMPLETE - GatherStats: false - Reduce Output Operator - key expressions: 0 (type: int), tw_value (type: double) - null sort order: az - sort order: ++ - Map-reduce partition columns: 0 (type: int) - Statistics: Num rows: 26 Data size: 1969 Basic stats: COMPLETE Column stats: COMPLETE - tag: -1 - TopN: 4 - TopN Hash Memory Usage: 0.1 - value expressions: tw_code (type: string) - auto parallelism: false - Execution mode: vectorized - Path -> Alias: -#### A masked pattern was here #### - Path -> Partition: -#### A masked pattern was here #### - Partition - base file name: topnkey_windowing - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"tw_code":"true","tw_value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns tw_code,tw_value - columns.comments - columns.types string:double -#### A masked pattern was here #### - name default.topnkey_windowing - numFiles 1 - numRows 26 - rawDataSize 176 - serialization.ddl struct topnkey_windowing { string tw_code, double tw_value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 202 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - input format: org.apache.hadoop.mapred.TextInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat - properties: - COLUMN_STATS_ACCURATE {"BASIC_STATS":"true","COLUMN_STATS":{"tw_code":"true","tw_value":"true"}} - bucket_count -1 - bucketing_version 2 - column.name.delimiter , - columns tw_code,tw_value - columns.comments - columns.types string:double -#### A masked pattern was here #### - name default.topnkey_windowing - numFiles 1 - numRows 26 - rawDataSize 176 - serialization.ddl struct topnkey_windowing { string tw_code, double tw_value} - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - totalSize 202 -#### A masked pattern was here #### - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - name: default.topnkey_windowing - name: default.topnkey_windowing - Truncated Path -> Alias: - /topnkey_windowing [$hdt$_0:topnkey_windowing] - Needs Tagging: false - Reduce Operator Tree: - Select Operator - expressions: VALUE._col0 (type: string), KEY.reducesinkkey1 (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 8937 Basic stats: COMPLETE Column stats: COMPLETE - PTF Operator - Function definitions: - Input definition - input alias: ptf_0 - output shape: _col0: string, _col1: double - type: WINDOWING - Windowing table definition - input alias: ptf_1 - name: windowingtablefunction - order by: _col1 ASC NULLS LAST - partition by: 0 - raw input shape: - window functions: - window function definition - alias: rank_window_0 - arguments: _col1 - name: rank - window function: GenericUDAFRankEvaluator - window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) - isPivotResult: true - Statistics: Num rows: 26 Data size: 8937 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - isSamplingPred: false - predicate: (rank_window_0 <= 3) (type: boolean) - Statistics: Num rows: 8 Data size: 2625 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: string), rank_window_0 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 8 Data size: 457 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - GlobalTableId: 0 -#### A masked pattern was here #### - NumFilesPerFileSink: 1 - Statistics: Num rows: 8 Data size: 457 Basic stats: COMPLETE Column stats: COMPLETE -#### A masked pattern was here #### - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - properties: - columns _col0,_col1 - columns.types string:int - escape.delim \ - hive.serialization.extend.additional.nesting.levels true - serialization.escape.crlf true - serialization.format 1 - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - TotalFiles: 1 - GatherStats: false - MultiFileSpray: false - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT tw_code, ranking -FROM ( - SELECT tw_code as tw_code, - rank() OVER (ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: SELECT tw_code, ranking -FROM ( - SELECT tw_code as tw_code, - rank() OVER (ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -A 1 -A 1 -A 3 -PREHOOK: query: SELECT tw_code, ranking -FROM ( - SELECT tw_code as tw_code, - rank() OVER (ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: SELECT tw_code, ranking -FROM ( - SELECT tw_code as tw_code, - rank() OVER (ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -A 1 -A 1 -A 3 -PREHOOK: query: EXPLAIN -SELECT tw_code, ranking -FROM ( - SELECT tw_code AS tw_code, - dense_rank() OVER (PARTITION BY tw_code ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT tw_code, ranking -FROM ( - SELECT tw_code AS tw_code, - dense_rank() OVER (PARTITION BY tw_code ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: topnkey_windowing - Statistics: Num rows: 26 Data size: 1969 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: tw_code (type: string), tw_value (type: double) - null sort order: az - sort order: ++ - Map-reduce partition columns: tw_code (type: string) - Statistics: Num rows: 26 Data size: 1969 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - Execution mode: vectorized - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: double) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 26 Data size: 8937 Basic stats: COMPLETE Column stats: COMPLETE - PTF Operator - Function definitions: - Input definition - input alias: ptf_0 - output shape: _col0: string, _col1: double - type: WINDOWING - Windowing table definition - input alias: ptf_1 - name: windowingtablefunction - order by: _col1 ASC NULLS LAST - partition by: _col0 - raw input shape: - window functions: - window function definition - alias: dense_rank_window_0 - arguments: _col1 - name: dense_rank - window function: GenericUDAFDenseRankEvaluator - window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) - isPivotResult: true - Statistics: Num rows: 26 Data size: 8937 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (dense_rank_window_0 <= 3) (type: boolean) - Statistics: Num rows: 8 Data size: 2625 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: string), dense_rank_window_0 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 8 Data size: 457 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 8 Data size: 457 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT tw_code, ranking -FROM ( - SELECT tw_code AS tw_code, - dense_rank() OVER (PARTITION BY tw_code ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: SELECT tw_code, ranking -FROM ( - SELECT tw_code AS tw_code, - dense_rank() OVER (PARTITION BY tw_code ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -NULL 1 -NULL 1 -NULL 1 -NULL 2 -NULL 2 -NULL 2 -A 1 -A 1 -A 2 -A 3 -B 1 -B 2 -B 2 -B 2 -B 3 -PREHOOK: query: SELECT tw_code, ranking -FROM ( - SELECT tw_code AS tw_code, - dense_rank() OVER (PARTITION BY tw_code ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: SELECT tw_code, ranking -FROM ( - SELECT tw_code AS tw_code, - dense_rank() OVER (PARTITION BY tw_code ORDER BY tw_value) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -NULL 1 -NULL 1 -NULL 1 -NULL 2 -NULL 2 -NULL 2 -A 1 -A 1 -A 2 -A 3 -B 1 -B 2 -B 2 -B 2 -B 3 -PREHOOK: query: DROP TABLE topnkey_windowing -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@topnkey_windowing -PREHOOK: Output: default@topnkey_windowing -POSTHOOK: query: DROP TABLE topnkey_windowing -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@topnkey_windowing -POSTHOOK: Output: default@topnkey_windowing diff --git ql/src/test/results/clientpositive/topnkey_windowing_order.q.out ql/src/test/results/clientpositive/topnkey_windowing_order.q.out deleted file mode 100644 index 7a9a67d001..0000000000 --- ql/src/test/results/clientpositive/topnkey_windowing_order.q.out +++ /dev/null @@ -1,517 +0,0 @@ -PREHOOK: query: CREATE TABLE topnkey_windowing (tw_a string, tw_b string, tw_v1 double, tw_v2 double) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@topnkey_windowing -POSTHOOK: query: CREATE TABLE topnkey_windowing (tw_a string, tw_b string, tw_v1 double, tw_v2 double) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@topnkey_windowing -PREHOOK: query: INSERT INTO topnkey_windowing VALUES - (NULL, NULL, NULL, NULL), - (NULL, 'D', 109, 9), - ('A', 'D', 109, 9), - ('A', 'D', 104, 9), - ('A', 'D', 109, 9), - ('A', 'C', 109, 9), - ('A', 'C', 103, 9), - (NULL, NULL, NULL, NULL), - (NULL, 'D', 109, 9), - ('A', 'D', 109, 9), - ('A', 'D', 101, 9), - ('A', 'D', 101, 9), - ('A', 'D', 114, 9), - ('A', 'D', 120, 9), - ('B', 'E', 105, 9), - ('B', 'E', 106, 9), - ('B', 'E', 106, 9), - ('B', 'E', NULL, NULL), - ('B', 'E', 106, 9), - ('A', 'C', 107, 9), - ('B', 'E', 108, 9), - ('A', 'C', 102, 9), - ('B', 'E', 110, 9), - (NULL, NULL, NULL, NULL), - (NULL, NULL, 109, 9), - ('A', 'D', 109, 9) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@topnkey_windowing -POSTHOOK: query: INSERT INTO topnkey_windowing VALUES - (NULL, NULL, NULL, NULL), - (NULL, 'D', 109, 9), - ('A', 'D', 109, 9), - ('A', 'D', 104, 9), - ('A', 'D', 109, 9), - ('A', 'C', 109, 9), - ('A', 'C', 103, 9), - (NULL, NULL, NULL, NULL), - (NULL, 'D', 109, 9), - ('A', 'D', 109, 9), - ('A', 'D', 101, 9), - ('A', 'D', 101, 9), - ('A', 'D', 114, 9), - ('A', 'D', 120, 9), - ('B', 'E', 105, 9), - ('B', 'E', 106, 9), - ('B', 'E', 106, 9), - ('B', 'E', NULL, NULL), - ('B', 'E', 106, 9), - ('A', 'C', 107, 9), - ('B', 'E', 108, 9), - ('A', 'C', 102, 9), - ('B', 'E', 110, 9), - (NULL, NULL, NULL, NULL), - (NULL, NULL, 109, 9), - ('A', 'D', 109, 9) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@topnkey_windowing -POSTHOOK: Lineage: topnkey_windowing.tw_a SCRIPT [] -POSTHOOK: Lineage: topnkey_windowing.tw_b SCRIPT [] -POSTHOOK: Lineage: topnkey_windowing.tw_v1 SCRIPT [] -POSTHOOK: Lineage: topnkey_windowing.tw_v2 SCRIPT [] -PREHOOK: query: EXPLAIN -SELECT tw_a, ranking -FROM ( - SELECT tw_a AS tw_a, - rank() OVER (PARTITION BY tw_a ORDER BY tw_v1 NULLS FIRST) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT tw_a, ranking -FROM ( - SELECT tw_a AS tw_a, - rank() OVER (PARTITION BY tw_a ORDER BY tw_v1 NULLS FIRST) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: topnkey_windowing - Statistics: Num rows: 26 Data size: 1969 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: tw_a (type: string), tw_v1 (type: double) - null sort order: aa - sort order: ++ - Map-reduce partition columns: tw_a (type: string) - Statistics: Num rows: 26 Data size: 1969 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: double) - outputColumnNames: _col0, _col2 - Statistics: Num rows: 26 Data size: 8937 Basic stats: COMPLETE Column stats: COMPLETE - PTF Operator - Function definitions: - Input definition - input alias: ptf_0 - output shape: _col0: string, _col2: double - type: WINDOWING - Windowing table definition - input alias: ptf_1 - name: windowingtablefunction - order by: _col2 ASC NULLS FIRST - partition by: _col0 - raw input shape: - window functions: - window function definition - alias: rank_window_0 - arguments: _col2 - name: rank - window function: GenericUDAFRankEvaluator - window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) - isPivotResult: true - Statistics: Num rows: 26 Data size: 8937 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (rank_window_0 <= 3) (type: boolean) - Statistics: Num rows: 8 Data size: 2625 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: string), rank_window_0 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 8 Data size: 457 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 8 Data size: 457 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT tw_a, ranking -FROM ( - SELECT tw_a AS tw_a, - rank() OVER (PARTITION BY tw_a ORDER BY tw_v1 NULLS FIRST) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: SELECT tw_a, ranking -FROM ( - SELECT tw_a AS tw_a, - rank() OVER (PARTITION BY tw_a ORDER BY tw_v1 NULLS FIRST) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -NULL 1 -NULL 1 -NULL 1 -A 1 -A 1 -A 3 -B 1 -B 2 -B 3 -B 3 -B 3 -PREHOOK: query: SELECT tw_a, ranking -FROM ( - SELECT tw_a AS tw_a, - rank() OVER (PARTITION BY tw_a ORDER BY tw_v1 NULLS FIRST) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: SELECT tw_a, ranking -FROM ( - SELECT tw_a AS tw_a, - rank() OVER (PARTITION BY tw_a ORDER BY tw_v1 NULLS FIRST) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -NULL 1 -NULL 1 -NULL 1 -A 1 -A 1 -A 3 -B 1 -B 2 -B 3 -B 3 -B 3 -PREHOOK: query: EXPLAIN -SELECT tw_a, ranking -FROM ( - SELECT tw_a AS tw_a, - rank() OVER (PARTITION BY tw_a ORDER BY tw_v1 ASC NULLS LAST, tw_v2 DESC NULLS FIRST) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT tw_a, ranking -FROM ( - SELECT tw_a AS tw_a, - rank() OVER (PARTITION BY tw_a ORDER BY tw_v1 ASC NULLS LAST, tw_v2 DESC NULLS FIRST) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: topnkey_windowing - Statistics: Num rows: 26 Data size: 2153 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: tw_a (type: string), tw_v1 (type: double), tw_v2 (type: double) - null sort order: aza - sort order: ++- - Map-reduce partition columns: tw_a (type: string) - Statistics: Num rows: 26 Data size: 2153 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: double), KEY.reducesinkkey2 (type: double) - outputColumnNames: _col0, _col2, _col3 - Statistics: Num rows: 26 Data size: 9121 Basic stats: COMPLETE Column stats: COMPLETE - PTF Operator - Function definitions: - Input definition - input alias: ptf_0 - output shape: _col0: string, _col2: double, _col3: double - type: WINDOWING - Windowing table definition - input alias: ptf_1 - name: windowingtablefunction - order by: _col2 ASC NULLS LAST, _col3 DESC NULLS FIRST - partition by: _col0 - raw input shape: - window functions: - window function definition - alias: rank_window_0 - arguments: _col2, _col3 - name: rank - window function: GenericUDAFRankEvaluator - window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) - isPivotResult: true - Statistics: Num rows: 26 Data size: 9121 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (rank_window_0 <= 3) (type: boolean) - Statistics: Num rows: 8 Data size: 2681 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: string), rank_window_0 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 8 Data size: 457 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 8 Data size: 457 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT tw_a, ranking -FROM ( - SELECT tw_a AS tw_a, - rank() OVER (PARTITION BY tw_a ORDER BY tw_v1 ASC NULLS LAST, tw_v2 DESC NULLS FIRST) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: SELECT tw_a, ranking -FROM ( - SELECT tw_a AS tw_a, - rank() OVER (PARTITION BY tw_a ORDER BY tw_v1 ASC NULLS LAST, tw_v2 DESC NULLS FIRST) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -NULL 1 -NULL 1 -NULL 1 -A 1 -A 1 -A 3 -B 1 -B 2 -B 2 -B 2 -PREHOOK: query: SELECT tw_a, ranking -FROM ( - SELECT tw_a AS tw_a, - rank() OVER (PARTITION BY tw_a ORDER BY tw_v1 ASC NULLS LAST, tw_v2 DESC NULLS FIRST) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: SELECT tw_a, ranking -FROM ( - SELECT tw_a AS tw_a, - rank() OVER (PARTITION BY tw_a ORDER BY tw_v1 ASC NULLS LAST, tw_v2 DESC NULLS FIRST) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -NULL 1 -NULL 1 -NULL 1 -A 1 -A 1 -A 3 -B 1 -B 2 -B 2 -B 2 -PREHOOK: query: EXPLAIN -SELECT tw_a, ranking -FROM ( - SELECT tw_a AS tw_a, - rank() OVER (PARTITION BY tw_a, tw_b ORDER BY tw_v1) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN -SELECT tw_a, ranking -FROM ( - SELECT tw_a AS tw_a, - rank() OVER (PARTITION BY tw_a, tw_b ORDER BY tw_v1) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-0 depends on stages: Stage-1 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: topnkey_windowing - Statistics: Num rows: 26 Data size: 3924 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: tw_a (type: string), tw_b (type: string), tw_v1 (type: double) - null sort order: aaz - sort order: +++ - Map-reduce partition columns: tw_a (type: string), tw_b (type: string) - Statistics: Num rows: 26 Data size: 3924 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), KEY.reducesinkkey2 (type: double) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 26 Data size: 10892 Basic stats: COMPLETE Column stats: COMPLETE - PTF Operator - Function definitions: - Input definition - input alias: ptf_0 - output shape: _col0: string, _col1: string, _col2: double - type: WINDOWING - Windowing table definition - input alias: ptf_1 - name: windowingtablefunction - order by: _col2 ASC NULLS LAST - partition by: _col0, _col1 - raw input shape: - window functions: - window function definition - alias: rank_window_0 - arguments: _col2 - name: rank - window function: GenericUDAFRankEvaluator - window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) - isPivotResult: true - Statistics: Num rows: 26 Data size: 10892 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (rank_window_0 <= 3) (type: boolean) - Statistics: Num rows: 8 Data size: 3220 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: string), rank_window_0 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 8 Data size: 457 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 8 Data size: 457 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: -1 - Processor Tree: - ListSink - -PREHOOK: query: SELECT tw_a, ranking -FROM ( - SELECT tw_a AS tw_a, - rank() OVER (PARTITION BY tw_a, tw_b ORDER BY tw_v1) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: SELECT tw_a, ranking -FROM ( - SELECT tw_a AS tw_a, - rank() OVER (PARTITION BY tw_a, tw_b ORDER BY tw_v1) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -NULL 1 -NULL 2 -NULL 2 -NULL 2 -NULL 1 -NULL 1 -A 1 -A 2 -A 3 -A 1 -A 1 -A 3 -B 1 -B 2 -B 2 -B 2 -PREHOOK: query: SELECT tw_a, ranking -FROM ( - SELECT tw_a AS tw_a, - rank() OVER (PARTITION BY tw_a, tw_b ORDER BY tw_v1) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -POSTHOOK: query: SELECT tw_a, ranking -FROM ( - SELECT tw_a AS tw_a, - rank() OVER (PARTITION BY tw_a, tw_b ORDER BY tw_v1) AS ranking - FROM topnkey_windowing) tmp1 - WHERE ranking <= 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@topnkey_windowing -#### A masked pattern was here #### -NULL 1 -NULL 2 -NULL 2 -NULL 2 -NULL 1 -NULL 1 -A 1 -A 2 -A 3 -A 1 -A 1 -A 3 -B 1 -B 2 -B 2 -B 2 -PREHOOK: query: DROP TABLE topnkey_windowing -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@topnkey_windowing -PREHOOK: Output: default@topnkey_windowing -POSTHOOK: query: DROP TABLE topnkey_windowing -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@topnkey_windowing -POSTHOOK: Output: default@topnkey_windowing diff --git ql/src/test/results/clientpositive/vector_topnkey.q.out ql/src/test/results/clientpositive/vector_topnkey.q.out deleted file mode 100644 index 1830eaecf4..0000000000 --- ql/src/test/results/clientpositive/vector_topnkey.q.out +++ /dev/null @@ -1,283 +0,0 @@ -PREHOOK: query: CREATE TABLE t_test( - cint1 int, - cint2 int, - cdouble double, - cvarchar varchar(50), - cdecimal1 decimal(10,2), - cdecimal2 decimal(38,5) -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@t_test -POSTHOOK: query: CREATE TABLE t_test( - cint1 int, - cint2 int, - cdouble double, - cvarchar varchar(50), - cdecimal1 decimal(10,2), - cdecimal2 decimal(38,5) -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@t_test -PREHOOK: query: INSERT INTO t_test VALUES -(NULL, NULL, NULL, NULL, NULL, NULL), -(8, 9, 2.0, 'one', 2.0, 2.0), (8, 9, 2.0, 'one', 2.0, 2.0), -(4, 2, 3.3, 'two', 3.3, 3.3), -(NULL, NULL, NULL, NULL, NULL, NULL), -(NULL, NULL, NULL, NULL, NULL, NULL), -(6, 2, 1.8, 'three', 1.8, 1.8), -(7, 8, 4.5, 'four', 4.5, 4.5), (7, 8, 4.5, 'four', 4.5, 4.5), (7, 8, 4.5, 'four', 4.5, 4.5), -(4, 1, 2.0, 'five', 2.0, 2.0), (4, 1, 2.0, 'five', 2.0, 2.0), (4, 1, 2.0, 'five', 2.0, 2.0), -(NULL, NULL, NULL, NULL, NULL, NULL) -PREHOOK: type: QUERY -PREHOOK: Input: _dummy_database@_dummy_table -PREHOOK: Output: default@t_test -POSTHOOK: query: INSERT INTO t_test VALUES -(NULL, NULL, NULL, NULL, NULL, NULL), -(8, 9, 2.0, 'one', 2.0, 2.0), (8, 9, 2.0, 'one', 2.0, 2.0), -(4, 2, 3.3, 'two', 3.3, 3.3), -(NULL, NULL, NULL, NULL, NULL, NULL), -(NULL, NULL, NULL, NULL, NULL, NULL), -(6, 2, 1.8, 'three', 1.8, 1.8), -(7, 8, 4.5, 'four', 4.5, 4.5), (7, 8, 4.5, 'four', 4.5, 4.5), (7, 8, 4.5, 'four', 4.5, 4.5), -(4, 1, 2.0, 'five', 2.0, 2.0), (4, 1, 2.0, 'five', 2.0, 2.0), (4, 1, 2.0, 'five', 2.0, 2.0), -(NULL, NULL, NULL, NULL, NULL, NULL) -POSTHOOK: type: QUERY -POSTHOOK: Input: _dummy_database@_dummy_table -POSTHOOK: Output: default@t_test -POSTHOOK: Lineage: t_test.cdecimal1 SCRIPT [] -POSTHOOK: Lineage: t_test.cdecimal2 SCRIPT [] -POSTHOOK: Lineage: t_test.cdouble SCRIPT [] -POSTHOOK: Lineage: t_test.cint1 SCRIPT [] -POSTHOOK: Lineage: t_test.cint2 SCRIPT [] -POSTHOOK: Lineage: t_test.cvarchar SCRIPT [] -PREHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT cint1 FROM t_test GROUP BY cint1 ORDER BY cint1 LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL -SELECT cint1 FROM t_test GROUP BY cint1 ORDER BY cint1 LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -PLAN VECTORIZATION: - enabled: true - enabledConditionsMet: [hive.vectorized.execution.enabled IS true] - -STAGE DEPENDENCIES: - Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 - -STAGE PLANS: - Stage: Stage-1 - Map Reduce - Map Operator Tree: - TableScan - alias: t_test - Statistics: Num rows: 14 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:cint1:int, 1:cint2:int, 2:cdouble:double, 3:cvarchar:varchar(50), 4:cdecimal1:decimal(10,2)/DECIMAL_64, 5:cdecimal2:decimal(38,5), 6:ROW__ID:struct] - Select Operator - expressions: cint1 (type: int) - outputColumnNames: cint1 - Select Vectorization: - className: VectorSelectOperator - native: true - projectedOutputColumnNums: [0] - Statistics: Num rows: 14 Data size: 44 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - Group By Vectorization: - className: VectorGroupByOperator - groupByMode: HASH - keyExpressions: col 0:int - native: false - vectorProcessingMode: HASH - projectedOutputColumnNums: [] - keys: cint1 (type: int) - minReductionHashAggr: 0.99 - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: _col0 (type: int) - Reduce Sink Vectorization: - className: VectorReduceSinkOperator - native: false - nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true - nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - Execution mode: vectorized - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true - inputFormatFeatureSupport: [DECIMAL_64] - featureSupportInUse: [DECIMAL_64] - inputFileFormats: org.apache.hadoop.mapred.TextInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 6 - includeColumns: [0] - dataColumns: cint1:int, cint2:int, cdouble:double, cvarchar:varchar(50), cdecimal1:decimal(10,2)/DECIMAL_64, cdecimal2:decimal(38,5) - partitionColumnCount: 0 - scratchColumnTypeNames: [] - Reduce Vectorization: - enabled: false - enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true - enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Reduce Operator Tree: - Group By Operator - keys: KEY._col0 (type: int) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - - Stage: Stage-2 - Map Reduce - Map Operator Tree: - TableScan - TableScan Vectorization: - native: true - vectorizationSchemaColumns: [0:_col0:int] - Reduce Output Operator - key expressions: _col0 (type: int) - null sort order: z - sort order: + - Reduce Sink Vectorization: - className: VectorReduceSinkOperator - native: false - nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true - nativeConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE - TopN Hash Memory Usage: 0.1 - Execution mode: vectorized - Map Vectorization: - enabled: true - enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true - inputFormatFeatureSupport: [] - featureSupportInUse: [] - inputFileFormats: org.apache.hadoop.mapred.SequenceFileInputFormat - allNative: false - usesVectorUDFAdaptor: false - vectorized: true - rowBatchContext: - dataColumnCount: 1 - includeColumns: [0] - dataColumns: _col0:int - partitionColumnCount: 0 - scratchColumnTypeNames: [] - Reduce Vectorization: - enabled: false - enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true - enableConditionsNotMet: hive.execution.engine mr IN [tez, spark] IS false - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE - Limit - Number of rows: 3 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-0 - Fetch Operator - limit: 3 - Processor Tree: - ListSink - -PREHOOK: query: SELECT cint1 FROM t_test GROUP BY cint1 ORDER BY cint1 LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT cint1 FROM t_test GROUP BY cint1 ORDER BY cint1 LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -4 -6 -7 -PREHOOK: query: SELECT cint1, cint2 FROM t_test GROUP BY cint1, cint2 ORDER BY cint1, cint2 LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT cint1, cint2 FROM t_test GROUP BY cint1, cint2 ORDER BY cint1, cint2 LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -4 1 -4 2 -6 2 -PREHOOK: query: SELECT cint1, cint2 FROM t_test GROUP BY cint1, cint2 ORDER BY cint1 DESC, cint2 LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT cint1, cint2 FROM t_test GROUP BY cint1, cint2 ORDER BY cint1 DESC, cint2 LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -8 9 -7 8 -6 2 -PREHOOK: query: SELECT cint1, cdouble FROM t_test GROUP BY cint1, cdouble ORDER BY cint1, cdouble LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT cint1, cdouble FROM t_test GROUP BY cint1, cdouble ORDER BY cint1, cdouble LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -4 2.0 -4 3.3 -6 1.8 -PREHOOK: query: SELECT cvarchar, cdouble FROM t_test GROUP BY cvarchar, cdouble ORDER BY cvarchar, cdouble LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT cvarchar, cdouble FROM t_test GROUP BY cvarchar, cdouble ORDER BY cvarchar, cdouble LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -five 2.0 -four 4.5 -one 2.0 -PREHOOK: query: SELECT cdecimal1, cdecimal2 FROM t_test GROUP BY cdecimal1, cdecimal2 ORDER BY cdecimal1, cdecimal2 LIMIT 3 -PREHOOK: type: QUERY -PREHOOK: Input: default@t_test -#### A masked pattern was here #### -POSTHOOK: query: SELECT cdecimal1, cdecimal2 FROM t_test GROUP BY cdecimal1, cdecimal2 ORDER BY cdecimal1, cdecimal2 LIMIT 3 -POSTHOOK: type: QUERY -POSTHOOK: Input: default@t_test -#### A masked pattern was here #### -1.80 1.80000 -2.00 2.00000 -3.30 3.30000 -PREHOOK: query: DROP TABLE t_test -PREHOOK: type: DROPTABLE -PREHOOK: Input: default@t_test -PREHOOK: Output: default@t_test -POSTHOOK: query: DROP TABLE t_test -POSTHOOK: type: DROPTABLE -POSTHOOK: Input: default@t_test -POSTHOOK: Output: default@t_test