diff --git a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java index 5364ca6..27fd66d 100644 --- a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java +++ b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java @@ -384,8 +384,11 @@ public void handle(Signal signal) { try { int lastRet = 0, ret = 0; + // we can not use "split" function directly as ";" may be quoted + List commands = splitSemiColon(line); + String command = ""; - for (String oneCmd : line.split(";")) { + for (String oneCmd : commands) { if (StringUtils.endsWith(oneCmd, "\\")) { command += StringUtils.chop(oneCmd) + ";"; @@ -415,6 +418,47 @@ public void handle(Signal signal) { } } } + + public static List splitSemiColon(String line) { + boolean insideSingleQuote = false; + boolean insideDoubleQuote = false; + boolean escape = false; + int beginIndex = 0; + List ret = new ArrayList<>(); + for (int index = 0; index < line.length(); index++) { + if (line.charAt(index) == '\'') { + // take a look to see if it is escaped + if (!escape) { + // flip the boolean variable + insideSingleQuote = !insideSingleQuote; + } + } else if (line.charAt(index) == '\"') { + // take a look to see if it is escaped + if (!escape) { + // flip the boolean variable + insideDoubleQuote = !insideDoubleQuote; + } + } else if (line.charAt(index) == ';') { + if (insideSingleQuote || insideDoubleQuote) { + // do not split + } else { + // split, do not include ; itself + ret.add(line.substring(beginIndex, index)); + beginIndex = index + 1; + } + } else { + // nothing to do + } + // set the escape + if (escape) { + escape = false; + } else if (line.charAt(index) == '\\') { + escape = true; + } + } + ret.add(line.substring(beginIndex)); + return ret; + } public int processReader(BufferedReader r) throws IOException { String line; diff --git a/hbase-handler/src/test/results/positive/hbase_binary_external_table_queries.q.out b/hbase-handler/src/test/results/positive/hbase_binary_external_table_queries.q.out index 2f43030..7eaba6b 100644 --- a/hbase-handler/src/test/results/positive/hbase_binary_external_table_queries.q.out +++ b/hbase-handler/src/test/results/positive/hbase_binary_external_table_queries.q.out @@ -117,13 +117,11 @@ POSTHOOK: Input: default@t_ext_hbase_3 key-1 true -128 -32768 -2147483648 -9223372036854775808 Hadoop, HBase, 1.4E-45 4.9E-324 key-2 false -1 -1 -1 -1 Hive -1.0 -1.0 key-3 true 127 32767 2147483647 9223372036854775807 Test Strings 3.4028235E38 1.7976931348623157E308 -PREHOOK: query: --HIVE-2958 -SELECT c_int, count(*) FROM t_ext_hbase_3 GROUP BY c_int +PREHOOK: query: SELECT c_int, count(*) FROM t_ext_hbase_3 GROUP BY c_int PREHOOK: type: QUERY PREHOOK: Input: default@t_ext_hbase_3 #### A masked pattern was here #### -POSTHOOK: query: --HIVE-2958 -SELECT c_int, count(*) FROM t_ext_hbase_3 GROUP BY c_int +POSTHOOK: query: SELECT c_int, count(*) FROM t_ext_hbase_3 GROUP BY c_int POSTHOOK: type: QUERY POSTHOOK: Input: default@t_ext_hbase_3 #### A masked pattern was here #### diff --git a/hbase-handler/src/test/results/positive/hbase_bulk.q.out b/hbase-handler/src/test/results/positive/hbase_bulk.q.out index 0dc5802..f38b7ec 100644 --- a/hbase-handler/src/test/results/positive/hbase_bulk.q.out +++ b/hbase-handler/src/test/results/positive/hbase_bulk.q.out @@ -6,9 +6,7 @@ PREHOOK: query: drop table hbpartition PREHOOK: type: DROPTABLE POSTHOOK: query: drop table hbpartition POSTHOOK: type: DROPTABLE -PREHOOK: query: -- this is a dummy table used for controlling how the HFiles are --- created -create table hbsort(key string, val string, val2 string) +PREHOOK: query: create table hbsort(key string, val string, val2 string) stored as INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.hbase.HiveHFileOutputFormat' @@ -16,9 +14,7 @@ OUTPUTFORMAT 'org.apache.hadoop.hive.hbase.HiveHFileOutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@hbsort -POSTHOOK: query: -- this is a dummy table used for controlling how the HFiles are --- created -create table hbsort(key string, val string, val2 string) +POSTHOOK: query: create table hbsort(key string, val string, val2 string) stored as INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.hbase.HiveHFileOutputFormat' @@ -26,9 +22,7 @@ OUTPUTFORMAT 'org.apache.hadoop.hive.hbase.HiveHFileOutputFormat' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@hbsort -PREHOOK: query: -- this is a dummy table used for controlling how the input file --- for TotalOrderPartitioner is created -create table hbpartition(part_break string) +PREHOOK: query: create table hbpartition(part_break string) row format serde 'org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe' stored as @@ -41,9 +35,7 @@ PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default PREHOOK: Output: default@hbpartition -POSTHOOK: query: -- this is a dummy table used for controlling how the input file --- for TotalOrderPartitioner is created -create table hbpartition(part_break string) +POSTHOOK: query: create table hbpartition(part_break string) row format serde 'org.apache.hadoop.hive.serde2.binarysortable.BinarySortableSerDe' stored as @@ -56,20 +48,14 @@ POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default POSTHOOK: Output: default@hbpartition -PREHOOK: query: -- this should produce one file, but we do not --- know what it will be called, so we will copy it to a well known -#### A masked pattern was here #### -insert overwrite table hbpartition +PREHOOK: query: insert overwrite table hbpartition select distinct value from src where value='val_100' or value='val_200' PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@hbpartition -POSTHOOK: query: -- this should produce one file, but we do not --- know what it will be called, so we will copy it to a well known -#### A masked pattern was here #### -insert overwrite table hbpartition +POSTHOOK: query: insert overwrite table hbpartition select distinct value from src where value='val_100' or value='val_200' @@ -78,8 +64,7 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@hbpartition POSTHOOK: Lineage: hbpartition.part_break SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] #### A masked pattern was here #### --- include some trailing blanks and nulls to make sure we handle them correctly -insert overwrite table hbsort +PREHOOK: query: insert overwrite table hbsort select distinct value, case when key=103 then cast(null as string) else key end, case when key=103 then '' @@ -89,9 +74,7 @@ cluster by value PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@hbsort -#### A masked pattern was here #### --- include some trailing blanks and nulls to make sure we handle them correctly -insert overwrite table hbsort +POSTHOOK: query: insert overwrite table hbsort select distinct value, case when key=103 then cast(null as string) else key end, case when key=103 then '' @@ -105,21 +88,11 @@ POSTHOOK: Lineage: hbsort.key SIMPLE [(src)src.FieldSchema(name:value, type:stri POSTHOOK: Lineage: hbsort.val EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: hbsort.val2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] #### A masked pattern was here #### -PREHOOK: query: -- To get the files out to your local filesystem for loading into -#### A masked pattern was here #### --- semicolon-terminate the line below before running this test: -#### A masked pattern was here #### - -drop table hbsort +PREHOOK: query: drop table hbsort PREHOOK: type: DROPTABLE PREHOOK: Input: default@hbsort PREHOOK: Output: default@hbsort -POSTHOOK: query: -- To get the files out to your local filesystem for loading into -#### A masked pattern was here #### --- semicolon-terminate the line below before running this test: -#### A masked pattern was here #### - -drop table hbsort +POSTHOOK: query: drop table hbsort POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@hbsort POSTHOOK: Output: default@hbsort diff --git a/hbase-handler/src/test/results/positive/hbase_custom_key2.q.out b/hbase-handler/src/test/results/positive/hbase_custom_key2.q.out index a0f5183..b1258e3 100644 --- a/hbase-handler/src/test/results/positive/hbase_custom_key2.q.out +++ b/hbase-handler/src/test/results/positive/hbase_custom_key2.q.out @@ -36,13 +36,11 @@ value POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@hbase_ck_4 -PREHOOK: query: -- 165,238,27,311,86 -select * from hbase_ck_4 +PREHOOK: query: select * from hbase_ck_4 PREHOOK: type: QUERY PREHOOK: Input: default@hbase_ck_4 #### A masked pattern was here #### -POSTHOOK: query: -- 165,238,27,311,86 -select * from hbase_ck_4 +POSTHOOK: query: select * from hbase_ck_4 POSTHOOK: type: QUERY POSTHOOK: Input: default@hbase_ck_4 #### A masked pattern was here #### @@ -51,12 +49,10 @@ POSTHOOK: Input: default@hbase_ck_4 {"col1":"27","col2":"1027","col3":"2027"} val_27 {"col1":"311","col2":"1311","col3":"2311"} val_311 {"col1":"86","col2":"1086","col3":"2086"} val_86 -PREHOOK: query: -- 238 -explain +PREHOOK: query: explain select * from hbase_ck_4 where key.col1 = '238' AND key.col2 = '1238' PREHOOK: type: QUERY -POSTHOOK: query: -- 238 -explain +POSTHOOK: query: explain select * from hbase_ck_4 where key.col1 = '238' AND key.col2 = '1238' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -89,12 +85,10 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@hbase_ck_4 #### A masked pattern was here #### {"col1":"238","col2":"1238","col3":"2238"} val_238 -PREHOOK: query: -- 165,238 -explain +PREHOOK: query: explain select * from hbase_ck_4 where key.col1 >= '165' AND key.col1 < '27' PREHOOK: type: QUERY -POSTHOOK: query: -- 165,238 -explain +POSTHOOK: query: explain select * from hbase_ck_4 where key.col1 >= '165' AND key.col1 < '27' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -128,12 +122,10 @@ POSTHOOK: Input: default@hbase_ck_4 #### A masked pattern was here #### {"col1":"165","col2":"1165","col3":"2165"} val_165 {"col1":"238","col2":"1238","col3":"2238"} val_238 -PREHOOK: query: -- 238,311 -explain +PREHOOK: query: explain select * from hbase_ck_4 where key.col1 > '100' AND key.col2 >= '1238' PREHOOK: type: QUERY -POSTHOOK: query: -- 238,311 -explain +POSTHOOK: query: explain select * from hbase_ck_4 where key.col1 > '100' AND key.col2 >= '1238' POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/hbase-handler/src/test/results/positive/hbase_custom_key3.q.out b/hbase-handler/src/test/results/positive/hbase_custom_key3.q.out index 25032cc..b573d4b 100644 --- a/hbase-handler/src/test/results/positive/hbase_custom_key3.q.out +++ b/hbase-handler/src/test/results/positive/hbase_custom_key3.q.out @@ -36,13 +36,11 @@ value POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@hbase_ck_5 -PREHOOK: query: -- 165,238,27,311,86 -select * from hbase_ck_5 +PREHOOK: query: select * from hbase_ck_5 PREHOOK: type: QUERY PREHOOK: Input: default@hbase_ck_5 #### A masked pattern was here #### -POSTHOOK: query: -- 165,238,27,311,86 -select * from hbase_ck_5 +POSTHOOK: query: select * from hbase_ck_5 POSTHOOK: type: QUERY POSTHOOK: Input: default@hbase_ck_5 #### A masked pattern was here #### @@ -51,12 +49,10 @@ POSTHOOK: Input: default@hbase_ck_5 {"col1":"27","col2":"1027","col3":"2027"} val_27 {"col1":"311","col2":"1311","col3":"2311"} val_311 {"col1":"86","col2":"1086","col3":"2086"} val_86 -PREHOOK: query: -- 238 -explain +PREHOOK: query: explain select * from hbase_ck_5 where key.col1 = '238' AND key.col2 = '1238' PREHOOK: type: QUERY -POSTHOOK: query: -- 238 -explain +POSTHOOK: query: explain select * from hbase_ck_5 where key.col1 = '238' AND key.col2 = '1238' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -89,12 +85,10 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@hbase_ck_5 #### A masked pattern was here #### {"col1":"238","col2":"1238","col3":"2238"} val_238 -PREHOOK: query: -- 165,238 -explain +PREHOOK: query: explain select * from hbase_ck_5 where key.col1 >= '165' AND key.col1 < '27' PREHOOK: type: QUERY -POSTHOOK: query: -- 165,238 -explain +POSTHOOK: query: explain select * from hbase_ck_5 where key.col1 >= '165' AND key.col1 < '27' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -127,12 +121,10 @@ POSTHOOK: Input: default@hbase_ck_5 #### A masked pattern was here #### {"col1":"165","col2":"1165","col3":"2165"} val_165 {"col1":"238","col2":"1238","col3":"2238"} val_238 -PREHOOK: query: -- 238,311 -explain +PREHOOK: query: explain select * from hbase_ck_5 where key.col1 > '100' AND key.col2 >= '1238' PREHOOK: type: QUERY -POSTHOOK: query: -- 238,311 -explain +POSTHOOK: query: explain select * from hbase_ck_5 where key.col1 > '100' AND key.col2 >= '1238' POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/hbase-handler/src/test/results/positive/hbase_handler_bulk.q.out b/hbase-handler/src/test/results/positive/hbase_handler_bulk.q.out index b08a5c5..1f42567 100644 --- a/hbase-handler/src/test/results/positive/hbase_handler_bulk.q.out +++ b/hbase-handler/src/test/results/positive/hbase_handler_bulk.q.out @@ -1,52 +1,34 @@ -PREHOOK: query: -- -*- mode:sql -*- - -drop table if exists hb_target +PREHOOK: query: drop table if exists hb_target PREHOOK: type: DROPTABLE -POSTHOOK: query: -- -*- mode:sql -*- - -drop table if exists hb_target +POSTHOOK: query: drop table if exists hb_target POSTHOOK: type: DROPTABLE -PREHOOK: query: -- this is the target HBase table -create table hb_target(key int, val string) +PREHOOK: query: create table hb_target(key int, val string) stored by 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' with serdeproperties ('hbase.columns.mapping' = ':key,cf:val') tblproperties ('hbase.table.name' = 'positive_hbase_handler_bulk') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@hb_target -POSTHOOK: query: -- this is the target HBase table -create table hb_target(key int, val string) +POSTHOOK: query: create table hb_target(key int, val string) stored by 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' with serdeproperties ('hbase.columns.mapping' = ':key,cf:val') tblproperties ('hbase.table.name' = 'positive_hbase_handler_bulk') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@hb_target -#### A masked pattern was here #### -insert overwrite table hb_target select distinct key, value from src cluster by key +PREHOOK: query: insert overwrite table hb_target select distinct key, value from src cluster by key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@hb_target -#### A masked pattern was here #### -insert overwrite table hb_target select distinct key, value from src cluster by key +POSTHOOK: query: insert overwrite table hb_target select distinct key, value from src cluster by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@hb_target -PREHOOK: query: -- To get the files out to your local filesystem for loading into -#### A masked pattern was here #### --- semicolon-terminate the line below before running this test: -#### A masked pattern was here #### - -drop table hb_target +PREHOOK: query: drop table hb_target PREHOOK: type: DROPTABLE PREHOOK: Input: default@hb_target PREHOOK: Output: default@hb_target -POSTHOOK: query: -- To get the files out to your local filesystem for loading into -#### A masked pattern was here #### --- semicolon-terminate the line below before running this test: -#### A masked pattern was here #### - -drop table hb_target +POSTHOOK: query: drop table hb_target POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@hb_target POSTHOOK: Output: default@hb_target diff --git a/hbase-handler/src/test/results/positive/hbase_joins.q.out b/hbase-handler/src/test/results/positive/hbase_joins.q.out index 1c21f73..697675e 100644 --- a/hbase-handler/src/test/results/positive/hbase_joins.q.out +++ b/hbase-handler/src/test/results/positive/hbase_joins.q.out @@ -14,9 +14,7 @@ PREHOOK: query: DROP TABLE users_level PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE users_level POSTHOOK: type: DROPTABLE -PREHOOK: query: -- From HIVE-1257 - -CREATE TABLE users(key string, state string, country string, country_id int) +PREHOOK: query: CREATE TABLE users(key string, state string, country string, country_id int) STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' WITH SERDEPROPERTIES ( "hbase.columns.mapping" = "info:state,info:country,info:country_id" @@ -24,9 +22,7 @@ WITH SERDEPROPERTIES ( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@users -POSTHOOK: query: -- From HIVE-1257 - -CREATE TABLE users(key string, state string, country string, country_id int) +POSTHOOK: query: CREATE TABLE users(key string, state string, country string, country_id int) STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' WITH SERDEPROPERTIES ( "hbase.columns.mapping" = "info:state,info:country,info:country_id" @@ -246,18 +242,14 @@ WITH SERDEPROPERTIES ("hbase.columns.mapping" = ":key,f:userid,f:level") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@users_level -PREHOOK: query: -- HIVE-1903: the problem fixed here showed up even without any data, --- so no need to load any to test it -SELECT year(from_unixtime(users.created)) AS year, level, count(users.userid) AS num +PREHOOK: query: SELECT year(from_unixtime(users.created)) AS year, level, count(users.userid) AS num FROM users JOIN users_level ON (users.userid = users_level.userid) GROUP BY year(from_unixtime(users.created)), level PREHOOK: type: QUERY PREHOOK: Input: default@users PREHOOK: Input: default@users_level #### A masked pattern was here #### -POSTHOOK: query: -- HIVE-1903: the problem fixed here showed up even without any data, --- so no need to load any to test it -SELECT year(from_unixtime(users.created)) AS year, level, count(users.userid) AS num +POSTHOOK: query: SELECT year(from_unixtime(users.created)) AS year, level, count(users.userid) AS num FROM users JOIN users_level ON (users.userid = users_level.userid) GROUP BY year(from_unixtime(users.created)), level POSTHOOK: type: QUERY diff --git a/hbase-handler/src/test/results/positive/hbase_ppd_join.q.out b/hbase-handler/src/test/results/positive/hbase_ppd_join.q.out index 83a3015..ee459e2 100644 --- a/hbase-handler/src/test/results/positive/hbase_ppd_join.q.out +++ b/hbase-handler/src/test/results/positive/hbase_ppd_join.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: --create hive hbase table 1 -drop table if exists hive1_tbl_data_hbase1 +PREHOOK: query: drop table if exists hive1_tbl_data_hbase1 PREHOOK: type: DROPTABLE -POSTHOOK: query: --create hive hbase table 1 -drop table if exists hive1_tbl_data_hbase1 +POSTHOOK: query: drop table if exists hive1_tbl_data_hbase1 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table if exists hive1_tbl_data_hbase2 PREHOOK: type: DROPTABLE @@ -30,8 +28,7 @@ WITH SERDEPROPERTIES("hbase.columns.mapping" = "default:COLUMID,default:COLUMN_F POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@hive1_tbl_data_hbase1 -PREHOOK: query: --create hive view for the above hive table 1 -CREATE VIEW hive1_view_data_hbase1 +PREHOOK: query: CREATE VIEW hive1_view_data_hbase1 AS SELECT * FROM hive1_tbl_data_hbase1 @@ -42,8 +39,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@hive1_tbl_data_hbase1 PREHOOK: Output: database:default PREHOOK: Output: default@hive1_view_data_hbase1 -POSTHOOK: query: --create hive view for the above hive table 1 -CREATE VIEW hive1_view_data_hbase1 +POSTHOOK: query: CREATE VIEW hive1_view_data_hbase1 AS SELECT * FROM hive1_tbl_data_hbase1 @@ -54,34 +50,29 @@ POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@hive1_tbl_data_hbase1 POSTHOOK: Output: database:default POSTHOOK: Output: default@hive1_view_data_hbase1 -PREHOOK: query: --load data to hive table 1 -insert into table hive1_tbl_data_hbase1 select '00001','john','doe','john@hotmail.com','2014-01-01 12:01:02','4000-10000' from src where key = 100 +PREHOOK: query: insert into table hive1_tbl_data_hbase1 select '00001','john','doe','john@hotmail.com','2014-01-01 12:01:02','4000-10000' from src where key = 100 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@hive1_tbl_data_hbase1 -POSTHOOK: query: --load data to hive table 1 -insert into table hive1_tbl_data_hbase1 select '00001','john','doe','john@hotmail.com','2014-01-01 12:01:02','4000-10000' from src where key = 100 +POSTHOOK: query: insert into table hive1_tbl_data_hbase1 select '00001','john','doe','john@hotmail.com','2014-01-01 12:01:02','4000-10000' from src where key = 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@hive1_tbl_data_hbase1 -PREHOOK: query: --create hive hbase table 2 -CREATE TABLE hive1_tbl_data_hbase2 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) +PREHOOK: query: CREATE TABLE hive1_tbl_data_hbase2 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' WITH SERDEPROPERTIES("hbase.columns.mapping" = "default:COLUMID,default:COLUMN_FN,default:COLUMN_LN,default:EMAIL,default:COL_UPDATED_DATE,:key" ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@hive1_tbl_data_hbase2 -POSTHOOK: query: --create hive hbase table 2 -CREATE TABLE hive1_tbl_data_hbase2 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) +POSTHOOK: query: CREATE TABLE hive1_tbl_data_hbase2 (COLUMID string,COLUMN_FN string,COLUMN_LN string,EMAIL string,COL_UPDATED_DATE timestamp, PK_COLUM string) STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' WITH SERDEPROPERTIES("hbase.columns.mapping" = "default:COLUMID,default:COLUMN_FN,default:COLUMN_LN,default:EMAIL,default:COL_UPDATED_DATE,:key" ) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@hive1_tbl_data_hbase2 -PREHOOK: query: --create hive view for the above hive hbase table 2 -CREATE VIEW hive1_view_data_hbase2 +PREHOOK: query: CREATE VIEW hive1_view_data_hbase2 AS SELECT * FROM hive1_tbl_data_hbase2 @@ -90,8 +81,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@hive1_tbl_data_hbase2 PREHOOK: Output: database:default PREHOOK: Output: default@hive1_view_data_hbase2 -POSTHOOK: query: --create hive view for the above hive hbase table 2 -CREATE VIEW hive1_view_data_hbase2 +POSTHOOK: query: CREATE VIEW hive1_view_data_hbase2 AS SELECT * FROM hive1_tbl_data_hbase2 @@ -100,19 +90,15 @@ POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@hive1_tbl_data_hbase2 POSTHOOK: Output: database:default POSTHOOK: Output: default@hive1_view_data_hbase2 -PREHOOK: query: --load data to hive hbase table 2 -insert into table hive1_tbl_data_hbase2 select '00001','john','doe','john@hotmail.com','2014-01-01 12:01:02','00001' from src where key = 100 +PREHOOK: query: insert into table hive1_tbl_data_hbase2 select '00001','john','doe','john@hotmail.com','2014-01-01 12:01:02','00001' from src where key = 100 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@hive1_tbl_data_hbase2 -POSTHOOK: query: --load data to hive hbase table 2 -insert into table hive1_tbl_data_hbase2 select '00001','john','doe','john@hotmail.com','2014-01-01 12:01:02','00001' from src where key = 100 +POSTHOOK: query: insert into table hive1_tbl_data_hbase2 select '00001','john','doe','john@hotmail.com','2014-01-01 12:01:02','00001' from src where key = 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@hive1_tbl_data_hbase2 -PREHOOK: query: -- do not return value without fix - -select x.FIRST_NAME1, x.EMAIL1 from ( +PREHOOK: query: select x.FIRST_NAME1, x.EMAIL1 from ( select p.COLUMN_FN as first_name1, a.EMAIL as email1 from hive1_view_data_hbase2 p inner join hive1_view_data_hbase1 a on p.COLUMID =a.COLUMID) x PREHOOK: type: QUERY PREHOOK: Input: default@hive1_tbl_data_hbase1 @@ -120,9 +106,7 @@ PREHOOK: Input: default@hive1_tbl_data_hbase2 PREHOOK: Input: default@hive1_view_data_hbase1 PREHOOK: Input: default@hive1_view_data_hbase2 #### A masked pattern was here #### -POSTHOOK: query: -- do not return value without fix - -select x.FIRST_NAME1, x.EMAIL1 from ( +POSTHOOK: query: select x.FIRST_NAME1, x.EMAIL1 from ( select p.COLUMN_FN as first_name1, a.EMAIL as email1 from hive1_view_data_hbase2 p inner join hive1_view_data_hbase1 a on p.COLUMID =a.COLUMID) x POSTHOOK: type: QUERY POSTHOOK: Input: default@hive1_tbl_data_hbase1 @@ -131,9 +115,7 @@ POSTHOOK: Input: default@hive1_view_data_hbase1 POSTHOOK: Input: default@hive1_view_data_hbase2 #### A masked pattern was here #### john john@hotmail.com -PREHOOK: query: -- return value with/without fix - -select x.FIRST_NAME1, x.EMAIL1 from ( +PREHOOK: query: select x.FIRST_NAME1, x.EMAIL1 from ( select p.COLUMN_FN as first_name1, a.EMAIL as email1 from hive1_view_data_hbase2 p inner join hive1_view_data_hbase1 a on p.COLUMID =a.COLUMID) x PREHOOK: type: QUERY PREHOOK: Input: default@hive1_tbl_data_hbase1 @@ -141,9 +123,7 @@ PREHOOK: Input: default@hive1_tbl_data_hbase2 PREHOOK: Input: default@hive1_view_data_hbase1 PREHOOK: Input: default@hive1_view_data_hbase2 #### A masked pattern was here #### -POSTHOOK: query: -- return value with/without fix - -select x.FIRST_NAME1, x.EMAIL1 from ( +POSTHOOK: query: select x.FIRST_NAME1, x.EMAIL1 from ( select p.COLUMN_FN as first_name1, a.EMAIL as email1 from hive1_view_data_hbase2 p inner join hive1_view_data_hbase1 a on p.COLUMID =a.COLUMID) x POSTHOOK: type: QUERY POSTHOOK: Input: default@hive1_tbl_data_hbase1 diff --git a/hbase-handler/src/test/results/positive/hbase_ppd_key_range.q.out b/hbase-handler/src/test/results/positive/hbase_ppd_key_range.q.out index 332c5e6..0d72bdc 100644 --- a/hbase-handler/src/test/results/positive/hbase_ppd_key_range.q.out +++ b/hbase-handler/src/test/results/positive/hbase_ppd_key_range.q.out @@ -22,11 +22,9 @@ FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@hbase_pushdown -PREHOOK: query: -- with full pushdown -explain select * from hbase_pushdown where key>'90' +PREHOOK: query: explain select * from hbase_pushdown where key>'90' PREHOOK: type: QUERY -POSTHOOK: query: -- with full pushdown -explain select * from hbase_pushdown where key>'90' +POSTHOOK: query: explain select * from hbase_pushdown where key>'90' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -163,11 +161,9 @@ POSTHOOK: Input: default@hbase_pushdown 96 val_96 97 val_97 98 val_98 -PREHOOK: query: -- with cnostant expressinon -explain select * from hbase_pushdown where key>=cast(40 + 50 as string) +PREHOOK: query: explain select * from hbase_pushdown where key>=cast(40 + 50 as string) PREHOOK: type: QUERY -POSTHOOK: query: -- with cnostant expressinon -explain select * from hbase_pushdown where key>=cast(40 + 50 as string) +POSTHOOK: query: explain select * from hbase_pushdown where key>=cast(40 + 50 as string) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -201,13 +197,9 @@ POSTHOOK: Input: default@hbase_pushdown 96 val_96 97 val_97 98 val_98 -PREHOOK: query: -- with partial pushdown - -explain select * from hbase_pushdown where key>'90' and value like '%9%' +PREHOOK: query: explain select * from hbase_pushdown where key>'90' and value like '%9%' PREHOOK: type: QUERY -POSTHOOK: query: -- with partial pushdown - -explain select * from hbase_pushdown where key>'90' and value like '%9%' +POSTHOOK: query: explain select * from hbase_pushdown where key>'90' and value like '%9%' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -255,14 +247,10 @@ POSTHOOK: Input: default@hbase_pushdown 96 val_96 97 val_97 98 val_98 -PREHOOK: query: -- with two residuals - -explain select * from hbase_pushdown +PREHOOK: query: explain select * from hbase_pushdown where key>='90' and value like '%9%' and key=cast(value as int) PREHOOK: type: QUERY -POSTHOOK: query: -- with two residuals - -explain select * from hbase_pushdown +POSTHOOK: query: explain select * from hbase_pushdown where key>='90' and value like '%9%' and key=cast(value as int) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -308,14 +296,10 @@ where key>='90' and value like '%9%' and key=cast(value as int) POSTHOOK: type: QUERY POSTHOOK: Input: default@hbase_pushdown #### A masked pattern was here #### -PREHOOK: query: -- with contradictory pushdowns - -explain select * from hbase_pushdown +PREHOOK: query: explain select * from hbase_pushdown where key<'80' and key>'90' and value like '%90%' PREHOOK: type: QUERY -POSTHOOK: query: -- with contradictory pushdowns - -explain select * from hbase_pushdown +POSTHOOK: query: explain select * from hbase_pushdown where key<'80' and key>'90' and value like '%90%' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -361,13 +345,9 @@ where key<'80' and key>'90' and value like '%90%' POSTHOOK: type: QUERY POSTHOOK: Input: default@hbase_pushdown #### A masked pattern was here #### -PREHOOK: query: -- with nothing to push down - -explain select * from hbase_pushdown +PREHOOK: query: explain select * from hbase_pushdown PREHOOK: type: QUERY -POSTHOOK: query: -- with nothing to push down - -explain select * from hbase_pushdown +POSTHOOK: query: explain select * from hbase_pushdown POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -386,16 +366,10 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- with a predicate which is not actually part of the filter, so --- it should be ignored by pushdown - -explain select * from hbase_pushdown +PREHOOK: query: explain select * from hbase_pushdown where (case when key<'90' then 2 else 4 end) > 3 PREHOOK: type: QUERY -POSTHOOK: query: -- with a predicate which is not actually part of the filter, so --- it should be ignored by pushdown - -explain select * from hbase_pushdown +POSTHOOK: query: explain select * from hbase_pushdown where (case when key<'90' then 2 else 4 end) > 3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -430,16 +404,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- with a predicate which is under an OR, so it should --- be ignored by pushdown - -explain select * from hbase_pushdown +PREHOOK: query: explain select * from hbase_pushdown where key<='80' or value like '%90%' PREHOOK: type: QUERY -POSTHOOK: query: -- with a predicate which is under an OR, so it should --- be ignored by pushdown - -explain select * from hbase_pushdown +POSTHOOK: query: explain select * from hbase_pushdown where key<='80' or value like '%90%' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -474,12 +442,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- following will get pushed into hbase after HIVE-2819 -explain select * from hbase_pushdown where key > '281' +PREHOOK: query: explain select * from hbase_pushdown where key > '281' and key < '287' PREHOOK: type: QUERY -POSTHOOK: query: -- following will get pushed into hbase after HIVE-2819 -explain select * from hbase_pushdown where key > '281' +POSTHOOK: query: explain select * from hbase_pushdown where key > '281' and key < '287' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -515,13 +481,9 @@ POSTHOOK: Input: default@hbase_pushdown 284 val_284 285 val_285 286 val_286 -PREHOOK: query: -- with pushdown disabled - -explain select * from hbase_pushdown where key<='90' +PREHOOK: query: explain select * from hbase_pushdown where key<='90' PREHOOK: type: QUERY -POSTHOOK: query: -- with pushdown disabled - -explain select * from hbase_pushdown where key<='90' +POSTHOOK: query: explain select * from hbase_pushdown where key<='90' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/hbase-handler/src/test/results/positive/hbase_pushdown.q.out b/hbase-handler/src/test/results/positive/hbase_pushdown.q.out index a5d8c6f..5fac123 100644 --- a/hbase-handler/src/test/results/positive/hbase_pushdown.q.out +++ b/hbase-handler/src/test/results/positive/hbase_pushdown.q.out @@ -22,11 +22,9 @@ FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@hbase_pushdown -PREHOOK: query: -- with full pushdown -explain select * from hbase_pushdown where key=90 +PREHOOK: query: explain select * from hbase_pushdown where key=90 PREHOOK: type: QUERY -POSTHOOK: query: -- with full pushdown -explain select * from hbase_pushdown where key=90 +POSTHOOK: query: explain select * from hbase_pushdown where key=90 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -55,13 +53,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@hbase_pushdown #### A masked pattern was here #### 90 val_90 -PREHOOK: query: -- with partial pushdown - -explain select * from hbase_pushdown where key=90 and value like '%90%' +PREHOOK: query: explain select * from hbase_pushdown where key=90 and value like '%90%' PREHOOK: type: QUERY -POSTHOOK: query: -- with partial pushdown - -explain select * from hbase_pushdown where key=90 and value like '%90%' +POSTHOOK: query: explain select * from hbase_pushdown where key=90 and value like '%90%' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -105,11 +99,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@hbase_pushdown #### A masked pattern was here #### 90 val_90 -PREHOOK: query: -- with partial pushdown with optimization (HIVE-6650) -explain select * from hbase_pushdown where key=90 and value like '%90%' +PREHOOK: query: explain select * from hbase_pushdown where key=90 and value like '%90%' PREHOOK: type: QUERY -POSTHOOK: query: -- with partial pushdown with optimization (HIVE-6650) -explain select * from hbase_pushdown where key=90 and value like '%90%' +POSTHOOK: query: explain select * from hbase_pushdown where key=90 and value like '%90%' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -153,14 +145,10 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@hbase_pushdown #### A masked pattern was here #### 90 val_90 -PREHOOK: query: -- with two residuals - -explain select * from hbase_pushdown +PREHOOK: query: explain select * from hbase_pushdown where key=90 and value like '%90%' and key=cast(value as int) PREHOOK: type: QUERY -POSTHOOK: query: -- with two residuals - -explain select * from hbase_pushdown +POSTHOOK: query: explain select * from hbase_pushdown where key=90 and value like '%90%' and key=cast(value as int) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -196,14 +184,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- with contradictory pushdowns - -explain select * from hbase_pushdown +PREHOOK: query: explain select * from hbase_pushdown where key=80 and key=90 and value like '%90%' PREHOOK: type: QUERY -POSTHOOK: query: -- with contradictory pushdowns - -explain select * from hbase_pushdown +POSTHOOK: query: explain select * from hbase_pushdown where key=80 and key=90 and value like '%90%' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -248,13 +232,9 @@ where key=80 and key=90 and value like '%90%' POSTHOOK: type: QUERY POSTHOOK: Input: default@hbase_pushdown #### A masked pattern was here #### -PREHOOK: query: -- with nothing to push down - -explain select * from hbase_pushdown +PREHOOK: query: explain select * from hbase_pushdown PREHOOK: type: QUERY -POSTHOOK: query: -- with nothing to push down - -explain select * from hbase_pushdown +POSTHOOK: query: explain select * from hbase_pushdown POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -273,16 +253,10 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- with a predicate which is not actually part of the filter, so --- it should be ignored by pushdown - -explain select * from hbase_pushdown +PREHOOK: query: explain select * from hbase_pushdown where (case when key=90 then 2 else 4 end) > 3 PREHOOK: type: QUERY -POSTHOOK: query: -- with a predicate which is not actually part of the filter, so --- it should be ignored by pushdown - -explain select * from hbase_pushdown +POSTHOOK: query: explain select * from hbase_pushdown where (case when key=90 then 2 else 4 end) > 3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -317,16 +291,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- with a predicate which is under an OR, so it should --- be ignored by pushdown - -explain select * from hbase_pushdown +PREHOOK: query: explain select * from hbase_pushdown where key=80 or value like '%90%' PREHOOK: type: QUERY -POSTHOOK: query: -- with a predicate which is under an OR, so it should --- be ignored by pushdown - -explain select * from hbase_pushdown +POSTHOOK: query: explain select * from hbase_pushdown where key=80 or value like '%90%' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -361,13 +329,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- with pushdown disabled - -explain select * from hbase_pushdown where key=90 +PREHOOK: query: explain select * from hbase_pushdown where key=90 PREHOOK: type: QUERY -POSTHOOK: query: -- with pushdown disabled - -explain select * from hbase_pushdown where key=90 +POSTHOOK: query: explain select * from hbase_pushdown where key=90 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out b/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out index e487a0b..a552350 100644 --- a/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out +++ b/hbase-handler/src/test/results/positive/hbase_single_sourced_multi_insert.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- HIVE-4375 Single sourced multi insert consists of native and non-native table mixed throws NPE -CREATE TABLE src_x1(key string, value string) +PREHOOK: query: CREATE TABLE src_x1(key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_x1 -POSTHOOK: query: -- HIVE-4375 Single sourced multi insert consists of native and non-native table mixed throws NPE -CREATE TABLE src_x1(key string, value string) +POSTHOOK: query: CREATE TABLE src_x1(key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_x1 diff --git a/hbase-handler/src/test/results/positive/hbase_timestamp_format.q.out b/hbase-handler/src/test/results/positive/hbase_timestamp_format.q.out index 0428e41..a2a2f56 100644 --- a/hbase-handler/src/test/results/positive/hbase_timestamp_format.q.out +++ b/hbase-handler/src/test/results/positive/hbase_timestamp_format.q.out @@ -40,16 +40,14 @@ POSTHOOK: Input: default@hbase_str 238 2001-02-03-04.05.06.123456 val_238 311 2001-02-03-04.05.06.123456 val_311 86 2001-02-03-04.05.06.123456 val_86 -PREHOOK: query: -- Timestamp string does not match the default timestamp format, specify a custom timestamp format -create external table hbase_ts(rowkey string,mytime timestamp,mystr string) +PREHOOK: query: create external table hbase_ts(rowkey string,mytime timestamp,mystr string) STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' WITH SERDEPROPERTIES ('hbase.columns.mapping' = 'm:mytime,m:mystr', 'timestamp.formats' = 'yyyy-MM-dd-HH.mm.ss.SSSSSS') TBLPROPERTIES ('hbase.table.name' = 'hbase_ts') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@hbase_ts -POSTHOOK: query: -- Timestamp string does not match the default timestamp format, specify a custom timestamp format -create external table hbase_ts(rowkey string,mytime timestamp,mystr string) +POSTHOOK: query: create external table hbase_ts(rowkey string,mytime timestamp,mystr string) STORED BY 'org.apache.hadoop.hive.hbase.HBaseStorageHandler' WITH SERDEPROPERTIES ('hbase.columns.mapping' = 'm:mytime,m:mystr', 'timestamp.formats' = 'yyyy-MM-dd-HH.mm.ss.SSSSSS') TBLPROPERTIES ('hbase.table.name' = 'hbase_ts') diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index 8b383c7..85f8fc6 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -64,6 +64,7 @@ import java.util.regex.Pattern; import com.google.common.base.Preconditions; + import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.commons.lang.StringUtils; @@ -1347,7 +1348,7 @@ public int executeClient(String tname) { } private int executeClientInternal(String commands) { - String [] cmds = commands.split(";"); + List cmds = CliDriver.splitSemiColon(commands); int rc = 0; String command = ""; @@ -1470,9 +1471,9 @@ private String getCommand(String tname) { StringBuilder newCommands = new StringBuilder(commands.length()); int lastMatchEnd = 0; Matcher commentMatcher = Pattern.compile("^--.*$", Pattern.MULTILINE).matcher(commands); + // remove the comments while (commentMatcher.find()) { newCommands.append(commands.substring(lastMatchEnd, commentMatcher.start())); - newCommands.append(commentMatcher.group().replaceAll("(? role1 -> role2 -> role3 ----------- - -create role role1 +PREHOOK: query: create role role1 PREHOOK: type: CREATEROLE -POSTHOOK: query: ---------- --- create the following user, role mapping --- user1 -> role1 -> role2 -> role3 ----------- - -create role role1 +POSTHOOK: query: create role role1 POSTHOOK: type: CREATEROLE PREHOOK: query: grant role1 to user user1 PREHOOK: type: GRANT_ROLE @@ -151,13 +141,9 @@ PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role ADMIN POSTHOOK: type: SHOW_ROLES -PREHOOK: query: -- Revoke role3 from hierarchy one at a time and check permissions --- after revoking from both, select should fail -revoke role3 from role role2 +PREHOOK: query: revoke role3 from role role2 PREHOOK: type: REVOKE_ROLE -POSTHOOK: query: -- Revoke role3 from hierarchy one at a time and check permissions --- after revoking from both, select should fail -revoke role3 from role role2 +POSTHOOK: query: revoke role3 from role role2 POSTHOOK: type: REVOKE_ROLE PREHOOK: query: show current roles PREHOOK: type: SHOW_ROLES diff --git a/ql/src/test/results/clientnegative/authorization_sba_drop_table.q.out b/ql/src/test/results/clientnegative/authorization_sba_drop_table.q.out index 39cf42e..f28a896 100644 --- a/ql/src/test/results/clientnegative/authorization_sba_drop_table.q.out +++ b/ql/src/test/results/clientnegative/authorization_sba_drop_table.q.out @@ -8,8 +8,7 @@ POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 -PREHOOK: query: -- Attempt to drop table without having write permissions on table dir should result in error -drop table t1 +PREHOOK: query: drop table t1 PREHOOK: type: DROPTABLE PREHOOK: Input: default@t1 PREHOOK: Output: default@t1 diff --git a/ql/src/test/results/clientnegative/authorization_select.q.out b/ql/src/test/results/clientnegative/authorization_select.q.out index 69cf766..3b5d09f 100644 --- a/ql/src/test/results/clientnegative/authorization_select.q.out +++ b/ql/src/test/results/clientnegative/authorization_select.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- check query without select privilege fails -create table t1(i int) +PREHOOK: query: create table t1(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- check query without select privilege fails -create table t1(i int) +POSTHOOK: query: create table t1(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 diff --git a/ql/src/test/results/clientnegative/authorization_select_view.q.out b/ql/src/test/results/clientnegative/authorization_select_view.q.out index a2a6b66..707757e 100644 --- a/ql/src/test/results/clientnegative/authorization_select_view.q.out +++ b/ql/src/test/results/clientnegative/authorization_select_view.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- check create view without select privileges -create table t1(i int) +PREHOOK: query: create table t1(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- check create view without select privileges -create table t1(i int) +POSTHOOK: query: create table t1(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 diff --git a/ql/src/test/results/clientnegative/authorization_set_invalidconf.q.out b/ql/src/test/results/clientnegative/authorization_set_invalidconf.q.out index 2a58c72..dd6092b 100644 --- a/ql/src/test/results/clientnegative/authorization_set_invalidconf.q.out +++ b/ql/src/test/results/clientnegative/authorization_set_invalidconf.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- run a sql query to initialize authorization, then try setting a allowed config and then a disallowed config param -use default +PREHOOK: query: use default PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:default -POSTHOOK: query: -- run a sql query to initialize authorization, then try setting a allowed config and then a disallowed config param -use default +POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default Query returned non-zero code: 1, cause: Cannot modify hive.security.authorization.enabled at runtime. It is not in list of params that are allowed to be modified at runtime diff --git a/ql/src/test/results/clientnegative/authorization_set_nonexistent_conf.q.out b/ql/src/test/results/clientnegative/authorization_set_nonexistent_conf.q.out index 8b2cf7e..b46d658 100644 --- a/ql/src/test/results/clientnegative/authorization_set_nonexistent_conf.q.out +++ b/ql/src/test/results/clientnegative/authorization_set_nonexistent_conf.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- run a sql query to initialize authorization, then try setting a non-existent config param -use default +PREHOOK: query: use default PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:default -POSTHOOK: query: -- run a sql query to initialize authorization, then try setting a non-existent config param -use default +POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default Query returned non-zero code: 1, cause: hive configuration hive.exec.reduce.max does not exists. diff --git a/ql/src/test/results/clientnegative/authorization_set_role_neg1.q.out b/ql/src/test/results/clientnegative/authorization_set_role_neg1.q.out index b1c647d..ee056be 100644 --- a/ql/src/test/results/clientnegative/authorization_set_role_neg1.q.out +++ b/ql/src/test/results/clientnegative/authorization_set_role_neg1.q.out @@ -1,5 +1,3 @@ -PREHOOK: query: -- an error should be thrown if 'set role ' is done for role that does not exist - -set role nosuchroleexists +PREHOOK: query: set role nosuchroleexists PREHOOK: type: SHOW_ROLES FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. hive_test_user doesn't belong to role nosuchroleexists diff --git a/ql/src/test/results/clientnegative/authorization_set_role_neg2.q.out b/ql/src/test/results/clientnegative/authorization_set_role_neg2.q.out index ee03c0e..539ce39 100644 --- a/ql/src/test/results/clientnegative/authorization_set_role_neg2.q.out +++ b/ql/src/test/results/clientnegative/authorization_set_role_neg2.q.out @@ -2,13 +2,9 @@ PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role ADMIN POSTHOOK: type: SHOW_ROLES -PREHOOK: query: -- an error should be thrown if 'set role ' is done for role that does not exist - -create role rset_role_neg +PREHOOK: query: create role rset_role_neg PREHOOK: type: CREATEROLE -POSTHOOK: query: -- an error should be thrown if 'set role ' is done for role that does not exist - -create role rset_role_neg +POSTHOOK: query: create role rset_role_neg POSTHOOK: type: CREATEROLE PREHOOK: query: grant role rset_role_neg to user user2 PREHOOK: type: GRANT_ROLE diff --git a/ql/src/test/results/clientnegative/authorization_show_columns.q.out b/ql/src/test/results/clientnegative/authorization_show_columns.q.out index 7dca55e..dcabfc7 100644 --- a/ql/src/test/results/clientnegative/authorization_show_columns.q.out +++ b/ql/src/test/results/clientnegative/authorization_show_columns.q.out @@ -10,13 +10,11 @@ PREHOOK: Input: database:db1 POSTHOOK: query: use db1 POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:db1 -PREHOOK: query: -- check query without select privilege fails -create table t1(i int) +PREHOOK: query: create table t1(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:db1 PREHOOK: Output: db1@t1 -POSTHOOK: query: -- check query without select privilege fails -create table t1(i int) +POSTHOOK: query: create table t1(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db1 POSTHOOK: Output: db1@t1 diff --git a/ql/src/test/results/clientnegative/authorization_show_parts_nosel.q.out b/ql/src/test/results/clientnegative/authorization_show_parts_nosel.q.out index 25d0c2e..8fbb7ed 100644 --- a/ql/src/test/results/clientnegative/authorization_show_parts_nosel.q.out +++ b/ql/src/test/results/clientnegative/authorization_show_parts_nosel.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- check if alter table fails as different user -create table t_show_parts(i int) partitioned by (j string) +PREHOOK: query: create table t_show_parts(i int) partitioned by (j string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t_show_parts -POSTHOOK: query: -- check if alter table fails as different user -create table t_show_parts(i int) partitioned by (j string) +POSTHOOK: query: create table t_show_parts(i int) partitioned by (j string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t_show_parts diff --git a/ql/src/test/results/clientnegative/authorization_show_role_principals_no_admin.q.out b/ql/src/test/results/clientnegative/authorization_show_role_principals_no_admin.q.out index 659edcc..8be27b5 100644 --- a/ql/src/test/results/clientnegative/authorization_show_role_principals_no_admin.q.out +++ b/ql/src/test/results/clientnegative/authorization_show_role_principals_no_admin.q.out @@ -1,4 +1,3 @@ -PREHOOK: query: -- This test will fail because hive_test_user is not in admin role -show principals role1 +PREHOOK: query: show principals role1 PREHOOK: type: SHOW_ROLE_PRINCIPALS FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Current user : hive_test_user is not allowed get principals in a role. User has to belong to ADMIN role and have it as current role, for this action. Otherwise, grantor need to have ADMIN OPTION on role being granted and have it as a current role for this action. diff --git a/ql/src/test/results/clientnegative/authorization_show_roles_no_admin.q.out b/ql/src/test/results/clientnegative/authorization_show_roles_no_admin.q.out index efff4bc..adc2788 100644 --- a/ql/src/test/results/clientnegative/authorization_show_roles_no_admin.q.out +++ b/ql/src/test/results/clientnegative/authorization_show_roles_no_admin.q.out @@ -1,4 +1,3 @@ -PREHOOK: query: -- This test will fail because hive_test_user is not in admin role -show roles +PREHOOK: query: show roles PREHOOK: type: SHOW_ROLES FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. Current user : hive_test_user is not allowed to list roles. User has to belong to ADMIN role and have it as current role, for this action. diff --git a/ql/src/test/results/clientnegative/authorization_table_grant_nosuchrole.q.out b/ql/src/test/results/clientnegative/authorization_table_grant_nosuchrole.q.out index 464c433..2fcc56e 100644 --- a/ql/src/test/results/clientnegative/authorization_table_grant_nosuchrole.q.out +++ b/ql/src/test/results/clientnegative/authorization_table_grant_nosuchrole.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: ---------------------------------------- --- granting object privilege to a role that does not exist should fail ----------------------------------------- -create table t1(i int) +PREHOOK: query: create table t1(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: ---------------------------------------- --- granting object privilege to a role that does not exist should fail ----------------------------------------- -create table t1(i int) +POSTHOOK: query: create table t1(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 diff --git a/ql/src/test/results/clientnegative/authorization_truncate.q.out b/ql/src/test/results/clientnegative/authorization_truncate.q.out index 1c675fe..c333731 100644 --- a/ql/src/test/results/clientnegative/authorization_truncate.q.out +++ b/ql/src/test/results/clientnegative/authorization_truncate.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- check add partition without insert privilege -create table t1(i int, j int) +PREHOOK: query: create table t1(i int, j int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- check add partition without insert privilege -create table t1(i int, j int) +POSTHOOK: query: create table t1(i int, j int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 diff --git a/ql/src/test/results/clientnegative/authorization_truncate_2.q.out b/ql/src/test/results/clientnegative/authorization_truncate_2.q.out index 04246f4..004eac1 100644 --- a/ql/src/test/results/clientnegative/authorization_truncate_2.q.out +++ b/ql/src/test/results/clientnegative/authorization_truncate_2.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- check truncate on partition -create table auth_trunc2(i int) partitioned by (j int) +PREHOOK: query: create table auth_trunc2(i int) partitioned by (j int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@auth_trunc2 -POSTHOOK: query: -- check truncate on partition -create table auth_trunc2(i int) partitioned by (j int) +POSTHOOK: query: create table auth_trunc2(i int) partitioned by (j int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@auth_trunc2 diff --git a/ql/src/test/results/clientnegative/authorization_update_noupdatepriv.q.out b/ql/src/test/results/clientnegative/authorization_update_noupdatepriv.q.out index e8ff975..ad6d551 100644 --- a/ql/src/test/results/clientnegative/authorization_update_noupdatepriv.q.out +++ b/ql/src/test/results/clientnegative/authorization_update_noupdatepriv.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- check update without update priv -create table auth_noupd(i int, j int) clustered by (j) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: query: create table auth_noupd(i int, j int) clustered by (j) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@auth_noupd -POSTHOOK: query: -- check update without update priv -create table auth_noupd(i int, j int) clustered by (j) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: query: create table auth_noupd(i int, j int) clustered by (j) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@auth_noupd diff --git a/ql/src/test/results/clientnegative/authorization_view_1.q.out b/ql/src/test/results/clientnegative/authorization_view_1.q.out index 72b134f..2a36dc6 100644 --- a/ql/src/test/results/clientnegative/authorization_view_1.q.out +++ b/ql/src/test/results/clientnegative/authorization_view_1.q.out @@ -20,14 +20,10 @@ POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src_autho_test POSTHOOK: Output: database:default POSTHOOK: Output: default@v -PREHOOK: query: --table grant to user - -grant select(key) on table src_autho_test to user hive_test_user +PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --table grant to user - -grant select(key) on table src_autho_test to user hive_test_user +POSTHOOK: query: grant select(key) on table src_autho_test to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test Authorization failed:No privilege 'Select' found for inputs { database:default, table:v, columnName:key}. Use SHOW GRANT to get more details. diff --git a/ql/src/test/results/clientnegative/authorization_view_2.q.out b/ql/src/test/results/clientnegative/authorization_view_2.q.out index e905299..099f42d 100644 --- a/ql/src/test/results/clientnegative/authorization_view_2.q.out +++ b/ql/src/test/results/clientnegative/authorization_view_2.q.out @@ -20,14 +20,10 @@ POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src_autho_test POSTHOOK: Output: database:default POSTHOOK: Output: default@v -PREHOOK: query: --table grant to user - -grant select(key) on table src_autho_test to user hive_test_user +PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --table grant to user - -grant select(key) on table src_autho_test to user hive_test_user +POSTHOOK: query: grant select(key) on table src_autho_test to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: grant select(key) on table v to user hive_test_user diff --git a/ql/src/test/results/clientnegative/authorization_view_3.q.out b/ql/src/test/results/clientnegative/authorization_view_3.q.out index 3e8f98d..a43f902 100644 --- a/ql/src/test/results/clientnegative/authorization_view_3.q.out +++ b/ql/src/test/results/clientnegative/authorization_view_3.q.out @@ -20,14 +20,10 @@ POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src_autho_test POSTHOOK: Output: database:default POSTHOOK: Output: default@v -PREHOOK: query: --table grant to user - -grant select(key) on table src_autho_test to user hive_test_user +PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --table grant to user - -grant select(key) on table src_autho_test to user hive_test_user +POSTHOOK: query: grant select(key) on table src_autho_test to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: grant select(key) on v to user hive_test_user diff --git a/ql/src/test/results/clientnegative/authorization_view_4.q.out b/ql/src/test/results/clientnegative/authorization_view_4.q.out index dbcc769..3214508 100644 --- a/ql/src/test/results/clientnegative/authorization_view_4.q.out +++ b/ql/src/test/results/clientnegative/authorization_view_4.q.out @@ -40,14 +40,10 @@ POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src_autho_test POSTHOOK: Output: database:default POSTHOOK: Output: default@v2 -PREHOOK: query: --table grant to user - -grant select on table src_autho_test to user hive_test_user +PREHOOK: query: grant select on table src_autho_test to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --table grant to user - -grant select on table src_autho_test to user hive_test_user +POSTHOOK: query: grant select on table src_autho_test to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: grant select on table v to user hive_test_user diff --git a/ql/src/test/results/clientnegative/authorization_view_6.q.out b/ql/src/test/results/clientnegative/authorization_view_6.q.out index 6584497..d3e7378 100644 --- a/ql/src/test/results/clientnegative/authorization_view_6.q.out +++ b/ql/src/test/results/clientnegative/authorization_view_6.q.out @@ -32,14 +32,10 @@ POSTHOOK: Input: default@src_autho_test POSTHOOK: Input: default@v1 POSTHOOK: Output: database:default POSTHOOK: Output: default@v2 -PREHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +PREHOOK: query: grant select on table v2 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@v2 -POSTHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +POSTHOOK: query: grant select on table v2 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@v2 Authorization failed:No privilege 'Select' found for inputs { database:default, table:src_autho_test, columnName:key}. Use SHOW GRANT to get more details. diff --git a/ql/src/test/results/clientnegative/authorization_view_7.q.out b/ql/src/test/results/clientnegative/authorization_view_7.q.out index e7c93f7..98186f0 100644 --- a/ql/src/test/results/clientnegative/authorization_view_7.q.out +++ b/ql/src/test/results/clientnegative/authorization_view_7.q.out @@ -32,14 +32,10 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@v1 POSTHOOK: Output: database:default POSTHOOK: Output: default@v2 -PREHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +PREHOOK: query: grant select on table v2 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@v2 -POSTHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +POSTHOOK: query: grant select on table v2 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@v2 Authorization failed:No privilege 'Select' found for inputs { database:default, table:src_autho_test, columnName:key}. Use SHOW GRANT to get more details. diff --git a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_1.q.out b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_1.q.out index 72b134f..2a36dc6 100644 --- a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_1.q.out +++ b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_1.q.out @@ -20,14 +20,10 @@ POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src_autho_test POSTHOOK: Output: database:default POSTHOOK: Output: default@v -PREHOOK: query: --table grant to user - -grant select(key) on table src_autho_test to user hive_test_user +PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --table grant to user - -grant select(key) on table src_autho_test to user hive_test_user +POSTHOOK: query: grant select(key) on table src_autho_test to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test Authorization failed:No privilege 'Select' found for inputs { database:default, table:v, columnName:key}. Use SHOW GRANT to get more details. diff --git a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_2.q.out b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_2.q.out index e905299..099f42d 100644 --- a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_2.q.out +++ b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_2.q.out @@ -20,14 +20,10 @@ POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src_autho_test POSTHOOK: Output: database:default POSTHOOK: Output: default@v -PREHOOK: query: --table grant to user - -grant select(key) on table src_autho_test to user hive_test_user +PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --table grant to user - -grant select(key) on table src_autho_test to user hive_test_user +POSTHOOK: query: grant select(key) on table src_autho_test to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: grant select(key) on table v to user hive_test_user diff --git a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_3.q.out b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_3.q.out index 3e8f98d..a43f902 100644 --- a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_3.q.out +++ b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_3.q.out @@ -20,14 +20,10 @@ POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src_autho_test POSTHOOK: Output: database:default POSTHOOK: Output: default@v -PREHOOK: query: --table grant to user - -grant select(key) on table src_autho_test to user hive_test_user +PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --table grant to user - -grant select(key) on table src_autho_test to user hive_test_user +POSTHOOK: query: grant select(key) on table src_autho_test to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: grant select(key) on v to user hive_test_user diff --git a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_4.q.out b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_4.q.out index dbcc769..3214508 100644 --- a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_4.q.out +++ b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_4.q.out @@ -40,14 +40,10 @@ POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src_autho_test POSTHOOK: Output: database:default POSTHOOK: Output: default@v2 -PREHOOK: query: --table grant to user - -grant select on table src_autho_test to user hive_test_user +PREHOOK: query: grant select on table src_autho_test to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --table grant to user - -grant select on table src_autho_test to user hive_test_user +POSTHOOK: query: grant select on table src_autho_test to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: grant select on table v to user hive_test_user diff --git a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_6.q.out b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_6.q.out index 6584497..d3e7378 100644 --- a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_6.q.out +++ b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_6.q.out @@ -32,14 +32,10 @@ POSTHOOK: Input: default@src_autho_test POSTHOOK: Input: default@v1 POSTHOOK: Output: database:default POSTHOOK: Output: default@v2 -PREHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +PREHOOK: query: grant select on table v2 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@v2 -POSTHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +POSTHOOK: query: grant select on table v2 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@v2 Authorization failed:No privilege 'Select' found for inputs { database:default, table:src_autho_test, columnName:key}. Use SHOW GRANT to get more details. diff --git a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_7.q.out b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_7.q.out index e7c93f7..98186f0 100644 --- a/ql/src/test/results/clientnegative/authorization_view_disable_cbo_7.q.out +++ b/ql/src/test/results/clientnegative/authorization_view_disable_cbo_7.q.out @@ -32,14 +32,10 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@v1 POSTHOOK: Output: database:default POSTHOOK: Output: default@v2 -PREHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +PREHOOK: query: grant select on table v2 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@v2 -POSTHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +POSTHOOK: query: grant select on table v2 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@v2 Authorization failed:No privilege 'Select' found for inputs { database:default, table:src_autho_test, columnName:key}. Use SHOW GRANT to get more details. diff --git a/ql/src/test/results/clientnegative/avro_non_nullable_union.q.out b/ql/src/test/results/clientnegative/avro_non_nullable_union.q.out index 6c087b8..c933081 100644 --- a/ql/src/test/results/clientnegative/avro_non_nullable_union.q.out +++ b/ql/src/test/results/clientnegative/avro_non_nullable_union.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- verify that Hive fails to read a union type column from avro file with null union data if AVRO schema definition is not nullable - -DROP TABLE IF EXISTS union_nullable_test_text +PREHOOK: query: DROP TABLE IF EXISTS union_nullable_test_text PREHOOK: type: DROPTABLE -POSTHOOK: query: -- verify that Hive fails to read a union type column from avro file with null union data if AVRO schema definition is not nullable - -DROP TABLE IF EXISTS union_nullable_test_text +POSTHOOK: query: DROP TABLE IF EXISTS union_nullable_test_text POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE union_nullable_test_text (id int, value uniontype) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY ':' STORED AS textfile PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out b/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out index 44fd262..79f1f93 100644 --- a/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out +++ b/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out @@ -74,22 +74,12 @@ POSTHOOK: query: load data local inpath '../../data/files/srcbucket23.txt' POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08 -PREHOOK: query: -- The number of buckets in the 2 tables above (being joined later) dont match. --- Throw an error if the user requested a bucketed mapjoin to be enforced. --- In the default case (hive.enforce.bucketmapjoin=false), the query succeeds --- even though mapjoin is not being performed - -explain +PREHOOK: query: explain select a.key, a.value, b.value from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b on a.key=b.key and a.ds="2008-04-08" and b.ds="2008-04-08" PREHOOK: type: QUERY -POSTHOOK: query: -- The number of buckets in the 2 tables above (being joined later) dont match. --- Throw an error if the user requested a bucketed mapjoin to be enforced. --- In the default case (hive.enforce.bucketmapjoin=false), the query succeeds --- even though mapjoin is not being performed - -explain +POSTHOOK: query: explain select a.key, a.value, b.value from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b on a.key=b.key and a.ds="2008-04-08" and b.ds="2008-04-08" diff --git a/ql/src/test/results/clientnegative/bucket_mapjoin_wrong_table_metadata_1.q.out b/ql/src/test/results/clientnegative/bucket_mapjoin_wrong_table_metadata_1.q.out index f4522d2..5e8630e 100644 --- a/ql/src/test/results/clientnegative/bucket_mapjoin_wrong_table_metadata_1.q.out +++ b/ql/src/test/results/clientnegative/bucket_mapjoin_wrong_table_metadata_1.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- Although the user has specified a bucketed map-join, the number of buckets in the table --- do not match the number of files -drop table table1 +PREHOOK: query: drop table table1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Although the user has specified a bucketed map-join, the number of buckets in the table --- do not match the number of files -drop table table1 +POSTHOOK: query: drop table table1 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table table2 PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientnegative/bucket_mapjoin_wrong_table_metadata_2.q.out b/ql/src/test/results/clientnegative/bucket_mapjoin_wrong_table_metadata_2.q.out index 9aa9b5d..668b384 100644 --- a/ql/src/test/results/clientnegative/bucket_mapjoin_wrong_table_metadata_2.q.out +++ b/ql/src/test/results/clientnegative/bucket_mapjoin_wrong_table_metadata_2.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- Although the user has specified a bucketed map-join, the number of buckets in the table --- do not match the number of files -drop table table1 +PREHOOK: query: drop table table1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Although the user has specified a bucketed map-join, the number of buckets in the table --- do not match the number of files -drop table table1 +POSTHOOK: query: drop table table1 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table table2 PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientnegative/compute_stats_long.q.out b/ql/src/test/results/clientnegative/compute_stats_long.q.out index 298e5e4..79f2146 100644 --- a/ql/src/test/results/clientnegative/compute_stats_long.q.out +++ b/ql/src/test/results/clientnegative/compute_stats_long.q.out @@ -6,18 +6,15 @@ POSTHOOK: query: create table tab_int(a int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tab_int -PREHOOK: query: -- insert some data -LOAD DATA LOCAL INPATH "../../data/files/int.txt" INTO TABLE tab_int +PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/int.txt" INTO TABLE tab_int PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@tab_int -POSTHOOK: query: -- insert some data -LOAD DATA LOCAL INPATH "../../data/files/int.txt" INTO TABLE tab_int +POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/int.txt" INTO TABLE tab_int POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@tab_int -PREHOOK: query: -- compute stats should raise an error since the number of bit vectors > 1024 -select compute_stats(a, 10000) from tab_int +PREHOOK: query: select compute_stats(a, 10000) from tab_int PREHOOK: type: QUERY PREHOOK: Input: default@tab_int #### A masked pattern was here #### diff --git a/ql/src/test/results/clientnegative/create_or_replace_view1.q.out b/ql/src/test/results/clientnegative/create_or_replace_view1.q.out index a96064b..9ec57e3 100644 --- a/ql/src/test/results/clientnegative/create_or_replace_view1.q.out +++ b/ql/src/test/results/clientnegative/create_or_replace_view1.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- Cannot add or drop partition columns with CREATE OR REPLACE VIEW if partitions currently exist (must specify partition columns) - -drop view v +PREHOOK: query: drop view v PREHOOK: type: DROPVIEW -POSTHOOK: query: -- Cannot add or drop partition columns with CREATE OR REPLACE VIEW if partitions currently exist (must specify partition columns) - -drop view v +POSTHOOK: query: drop view v POSTHOOK: type: DROPVIEW PREHOOK: query: create view v partitioned on (ds, hr) as select * from srcpart PREHOOK: type: CREATEVIEW diff --git a/ql/src/test/results/clientnegative/create_or_replace_view2.q.out b/ql/src/test/results/clientnegative/create_or_replace_view2.q.out index 8233a4e..9ec57e3 100644 --- a/ql/src/test/results/clientnegative/create_or_replace_view2.q.out +++ b/ql/src/test/results/clientnegative/create_or_replace_view2.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- Cannot add or drop partition columns with CREATE OR REPLACE VIEW if partitions currently exist - -drop view v +PREHOOK: query: drop view v PREHOOK: type: DROPVIEW -POSTHOOK: query: -- Cannot add or drop partition columns with CREATE OR REPLACE VIEW if partitions currently exist - -drop view v +POSTHOOK: query: drop view v POSTHOOK: type: DROPVIEW PREHOOK: query: create view v partitioned on (ds, hr) as select * from srcpart PREHOOK: type: CREATEVIEW diff --git a/ql/src/test/results/clientnegative/create_or_replace_view4.q.out b/ql/src/test/results/clientnegative/create_or_replace_view4.q.out index 289a664..71a3d80 100644 --- a/ql/src/test/results/clientnegative/create_or_replace_view4.q.out +++ b/ql/src/test/results/clientnegative/create_or_replace_view4.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- View must have at least one non-partition column. - -drop view v +PREHOOK: query: drop view v PREHOOK: type: DROPVIEW -POSTHOOK: query: -- View must have at least one non-partition column. - -drop view v +POSTHOOK: query: drop view v POSTHOOK: type: DROPVIEW PREHOOK: query: create view v partitioned on (ds, hr) as select * from srcpart PREHOOK: type: CREATEVIEW diff --git a/ql/src/test/results/clientnegative/create_or_replace_view5.q.out b/ql/src/test/results/clientnegative/create_or_replace_view5.q.out index 68213f4..669a228 100644 --- a/ql/src/test/results/clientnegative/create_or_replace_view5.q.out +++ b/ql/src/test/results/clientnegative/create_or_replace_view5.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- Can't combine IF NOT EXISTS and OR REPLACE. - -drop view v +PREHOOK: query: drop view v PREHOOK: type: DROPVIEW -POSTHOOK: query: -- Can't combine IF NOT EXISTS and OR REPLACE. - -drop view v +POSTHOOK: query: drop view v POSTHOOK: type: DROPVIEW PREHOOK: query: create view v partitioned on (ds, hr) as select * from srcpart PREHOOK: type: CREATEVIEW diff --git a/ql/src/test/results/clientnegative/create_or_replace_view6.q.out b/ql/src/test/results/clientnegative/create_or_replace_view6.q.out index 6f571dd..a5702ab 100644 --- a/ql/src/test/results/clientnegative/create_or_replace_view6.q.out +++ b/ql/src/test/results/clientnegative/create_or_replace_view6.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- Can't update view to have an invalid definition - -drop view v +PREHOOK: query: drop view v PREHOOK: type: DROPVIEW -POSTHOOK: query: -- Can't update view to have an invalid definition - -drop view v +POSTHOOK: query: drop view v POSTHOOK: type: DROPVIEW PREHOOK: query: create view v partitioned on (ds, hr) as select * from srcpart PREHOOK: type: CREATEVIEW diff --git a/ql/src/test/results/clientnegative/create_or_replace_view7.q.out b/ql/src/test/results/clientnegative/create_or_replace_view7.q.out index ccc1f66..2d73a1f 100644 --- a/ql/src/test/results/clientnegative/create_or_replace_view7.q.out +++ b/ql/src/test/results/clientnegative/create_or_replace_view7.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- Can't update view to have a view cycle (1) - -drop view v +PREHOOK: query: drop view v PREHOOK: type: DROPVIEW -POSTHOOK: query: -- Can't update view to have a view cycle (1) - -drop view v +POSTHOOK: query: drop view v POSTHOOK: type: DROPVIEW PREHOOK: query: create view v1 partitioned on (ds, hr) as select * from srcpart PREHOOK: type: CREATEVIEW diff --git a/ql/src/test/results/clientnegative/create_or_replace_view8.q.out b/ql/src/test/results/clientnegative/create_or_replace_view8.q.out index 4a65a3a..2d8d5b0 100644 --- a/ql/src/test/results/clientnegative/create_or_replace_view8.q.out +++ b/ql/src/test/results/clientnegative/create_or_replace_view8.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- Can't update view to have a view cycle (2) - -drop view v +PREHOOK: query: drop view v PREHOOK: type: DROPVIEW -POSTHOOK: query: -- Can't update view to have a view cycle (2) - -drop view v +POSTHOOK: query: drop view v POSTHOOK: type: DROPVIEW PREHOOK: query: create view v1 partitioned on (ds, hr) as select * from srcpart PREHOOK: type: CREATEVIEW diff --git a/ql/src/test/results/clientnegative/create_view_failure1.q.out b/ql/src/test/results/clientnegative/create_view_failure1.q.out index 4be2886..bf149fc 100644 --- a/ql/src/test/results/clientnegative/create_view_failure1.q.out +++ b/ql/src/test/results/clientnegative/create_view_failure1.q.out @@ -2,13 +2,11 @@ PREHOOK: query: DROP VIEW xxx12 PREHOOK: type: DROPVIEW POSTHOOK: query: DROP VIEW xxx12 POSTHOOK: type: DROPVIEW -PREHOOK: query: -- views and tables share the same namespace -CREATE TABLE xxx12(key int) +PREHOOK: query: CREATE TABLE xxx12(key int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@xxx12 -POSTHOOK: query: -- views and tables share the same namespace -CREATE TABLE xxx12(key int) +POSTHOOK: query: CREATE TABLE xxx12(key int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@xxx12 diff --git a/ql/src/test/results/clientnegative/create_view_failure10.q.out b/ql/src/test/results/clientnegative/create_view_failure10.q.out index 2823d9f..62557cb 100644 --- a/ql/src/test/results/clientnegative/create_view_failure10.q.out +++ b/ql/src/test/results/clientnegative/create_view_failure10.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- CREATE VIEW should fail if it references a temp table -create temporary table tmp1 (c1 string, c2 string) +PREHOOK: query: create temporary table tmp1 (c1 string, c2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmp1 -POSTHOOK: query: -- CREATE VIEW should fail if it references a temp table -create temporary table tmp1 (c1 string, c2 string) +POSTHOOK: query: create temporary table tmp1 (c1 string, c2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmp1 diff --git a/ql/src/test/results/clientnegative/create_view_failure2.q.out b/ql/src/test/results/clientnegative/create_view_failure2.q.out index aadceeb..58da497 100644 --- a/ql/src/test/results/clientnegative/create_view_failure2.q.out +++ b/ql/src/test/results/clientnegative/create_view_failure2.q.out @@ -2,14 +2,12 @@ PREHOOK: query: DROP VIEW xxx4 PREHOOK: type: DROPVIEW POSTHOOK: query: DROP VIEW xxx4 POSTHOOK: type: DROPVIEW -PREHOOK: query: -- views and tables share the same namespace -CREATE VIEW xxx4 AS SELECT key FROM src +PREHOOK: query: CREATE VIEW xxx4 AS SELECT key FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@xxx4 -POSTHOOK: query: -- views and tables share the same namespace -CREATE VIEW xxx4 AS SELECT key FROM src +POSTHOOK: query: CREATE VIEW xxx4 AS SELECT key FROM src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientnegative/create_view_failure4.q.out b/ql/src/test/results/clientnegative/create_view_failure4.q.out index 8e47735..300715d 100644 --- a/ql/src/test/results/clientnegative/create_view_failure4.q.out +++ b/ql/src/test/results/clientnegative/create_view_failure4.q.out @@ -2,8 +2,7 @@ PREHOOK: query: DROP VIEW xxx5 PREHOOK: type: DROPVIEW POSTHOOK: query: DROP VIEW xxx5 POSTHOOK: type: DROPVIEW -PREHOOK: query: -- duplicate column names are illegal -CREATE VIEW xxx5(x,x) AS +PREHOOK: query: CREATE VIEW xxx5(x,x) AS SELECT key,value FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src diff --git a/ql/src/test/results/clientnegative/database_create_already_exists.q.out b/ql/src/test/results/clientnegative/database_create_already_exists.q.out index 74ca7c4..16f45ba 100644 --- a/ql/src/test/results/clientnegative/database_create_already_exists.q.out +++ b/ql/src/test/results/clientnegative/database_create_already_exists.q.out @@ -3,12 +3,10 @@ PREHOOK: type: SHOWDATABASES POSTHOOK: query: SHOW DATABASES POSTHOOK: type: SHOWDATABASES default -PREHOOK: query: -- Try to create a database that already exists -CREATE DATABASE test_db +PREHOOK: query: CREATE DATABASE test_db PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:test_db -POSTHOOK: query: -- Try to create a database that already exists -CREATE DATABASE test_db +POSTHOOK: query: CREATE DATABASE test_db POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:test_db PREHOOK: query: CREATE DATABASE test_db diff --git a/ql/src/test/results/clientnegative/database_create_invalid_name.q.out b/ql/src/test/results/clientnegative/database_create_invalid_name.q.out index 207cb07..7a765e1 100644 --- a/ql/src/test/results/clientnegative/database_create_invalid_name.q.out +++ b/ql/src/test/results/clientnegative/database_create_invalid_name.q.out @@ -3,8 +3,7 @@ PREHOOK: type: SHOWDATABASES POSTHOOK: query: SHOW DATABASES POSTHOOK: type: SHOWDATABASES default -PREHOOK: query: -- Try to create a database with an invalid name -CREATE DATABASE `test.db` +PREHOOK: query: CREATE DATABASE `test.db` PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:test.db FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. InvalidObjectException(message:test.db is not a valid database name) diff --git a/ql/src/test/results/clientnegative/database_drop_not_empty.q.out b/ql/src/test/results/clientnegative/database_drop_not_empty.q.out index a750d02..5758d67 100644 --- a/ql/src/test/results/clientnegative/database_drop_not_empty.q.out +++ b/ql/src/test/results/clientnegative/database_drop_not_empty.q.out @@ -3,12 +3,10 @@ PREHOOK: type: SHOWDATABASES POSTHOOK: query: SHOW DATABASES POSTHOOK: type: SHOWDATABASES default -PREHOOK: query: -- Try to drop a non-empty database -CREATE DATABASE test_db +PREHOOK: query: CREATE DATABASE test_db PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:test_db -POSTHOOK: query: -- Try to drop a non-empty database -CREATE DATABASE test_db +POSTHOOK: query: CREATE DATABASE test_db POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:test_db PREHOOK: query: USE test_db diff --git a/ql/src/test/results/clientnegative/database_drop_not_empty_restrict.q.out b/ql/src/test/results/clientnegative/database_drop_not_empty_restrict.q.out index 167c862..5034977 100644 --- a/ql/src/test/results/clientnegative/database_drop_not_empty_restrict.q.out +++ b/ql/src/test/results/clientnegative/database_drop_not_empty_restrict.q.out @@ -3,12 +3,10 @@ PREHOOK: type: SHOWDATABASES POSTHOOK: query: SHOW DATABASES POSTHOOK: type: SHOWDATABASES default -PREHOOK: query: -- Try to drop a non-empty database in restrict mode -CREATE DATABASE db_drop_non_empty_restrict +PREHOOK: query: CREATE DATABASE db_drop_non_empty_restrict PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:db_drop_non_empty_restrict -POSTHOOK: query: -- Try to drop a non-empty database in restrict mode -CREATE DATABASE db_drop_non_empty_restrict +POSTHOOK: query: CREATE DATABASE db_drop_non_empty_restrict POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:db_drop_non_empty_restrict PREHOOK: query: USE db_drop_non_empty_restrict diff --git a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out index 69b2b41..8fe4c05 100644 --- a/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out +++ b/ql/src/test/results/clientnegative/disallow_incompatible_type_change_on1.q.out @@ -100,11 +100,7 @@ POSTHOOK: query: ALTER TABLE test_table123 CHANGE COLUMN a a_new BOOLEAN POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: default@test_table123 POSTHOOK: Output: default@test_table123 -PREHOOK: query: -- All the above ALTERs will succeed since they are between compatible types. --- The following ALTER will fail as MAP and STRING are not --- compatible. - -ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING) +PREHOOK: query: ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING) PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@test_table123 PREHOOK: Output: default@test_table123 diff --git a/ql/src/test/results/clientnegative/drop_database_cascade.q.out b/ql/src/test/results/clientnegative/drop_database_cascade.q.out index 170a017..463dc7f 100644 --- a/ql/src/test/results/clientnegative/drop_database_cascade.q.out +++ b/ql/src/test/results/clientnegative/drop_database_cascade.q.out @@ -1,13 +1,7 @@ -PREHOOK: query: -- This test verifies that if the functions and tables unregistered when the database is dropped --- and other databases are not affected - -CREATE DATABASE TEST_database +PREHOOK: query: CREATE DATABASE TEST_database PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:TEST_database -POSTHOOK: query: -- This test verifies that if the functions and tables unregistered when the database is dropped --- and other databases are not affected - -CREATE DATABASE TEST_database +POSTHOOK: query: CREATE DATABASE TEST_database POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:TEST_database PREHOOK: query: USE TEST_database diff --git a/ql/src/test/results/clientnegative/drop_table_failure2.q.out b/ql/src/test/results/clientnegative/drop_table_failure2.q.out index 956ed8b..bcb4bfe 100644 --- a/ql/src/test/results/clientnegative/drop_table_failure2.q.out +++ b/ql/src/test/results/clientnegative/drop_table_failure2.q.out @@ -8,8 +8,7 @@ POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default POSTHOOK: Output: default@xxx6 -PREHOOK: query: -- Can't use DROP TABLE on a view -DROP TABLE xxx6 +PREHOOK: query: DROP TABLE xxx6 PREHOOK: type: DROPTABLE PREHOOK: Input: default@xxx6 PREHOOK: Output: default@xxx6 diff --git a/ql/src/test/results/clientnegative/drop_view_failure1.q.out b/ql/src/test/results/clientnegative/drop_view_failure1.q.out index f954ac7..a1a4498 100644 --- a/ql/src/test/results/clientnegative/drop_view_failure1.q.out +++ b/ql/src/test/results/clientnegative/drop_view_failure1.q.out @@ -6,8 +6,7 @@ POSTHOOK: query: CREATE TABLE xxx1(key int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@xxx1 -PREHOOK: query: -- Can't use DROP VIEW on a base table -DROP VIEW xxx1 +PREHOOK: query: DROP VIEW xxx1 PREHOOK: type: DROPVIEW PREHOOK: Input: default@xxx1 PREHOOK: Output: default@xxx1 diff --git a/ql/src/test/results/clientnegative/dyn_part_max.q.out b/ql/src/test/results/clientnegative/dyn_part_max.q.out index 4386720..736bfac 100644 --- a/ql/src/test/results/clientnegative/dyn_part_max.q.out +++ b/ql/src/test/results/clientnegative/dyn_part_max.q.out @@ -4,17 +4,11 @@ PREHOOK: Input: database:default POSTHOOK: query: USE default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: -- Test of hive.exec.max.dynamic.partitions --- Set hive.exec.max.dynamic.partitions.pernode to a large value so it will be ignored - -CREATE TABLE max_parts(key STRING) PARTITIONED BY (value STRING) +PREHOOK: query: CREATE TABLE max_parts(key STRING) PARTITIONED BY (value STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@max_parts -POSTHOOK: query: -- Test of hive.exec.max.dynamic.partitions --- Set hive.exec.max.dynamic.partitions.pernode to a large value so it will be ignored - -CREATE TABLE max_parts(key STRING) PARTITIONED BY (value STRING) +POSTHOOK: query: CREATE TABLE max_parts(key STRING) PARTITIONED BY (value STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@max_parts diff --git a/ql/src/test/results/clientnegative/dyn_part_max_per_node.q.out b/ql/src/test/results/clientnegative/dyn_part_max_per_node.q.out index b8c21ed..44d266c 100644 --- a/ql/src/test/results/clientnegative/dyn_part_max_per_node.q.out +++ b/ql/src/test/results/clientnegative/dyn_part_max_per_node.q.out @@ -4,15 +4,11 @@ PREHOOK: Input: database:default POSTHOOK: query: USE default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: -- Test of hive.exec.max.dynamic.partitions.pernode - -CREATE TABLE max_parts(key STRING) PARTITIONED BY (value STRING) +PREHOOK: query: CREATE TABLE max_parts(key STRING) PARTITIONED BY (value STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@max_parts -POSTHOOK: query: -- Test of hive.exec.max.dynamic.partitions.pernode - -CREATE TABLE max_parts(key STRING) PARTITIONED BY (value STRING) +POSTHOOK: query: CREATE TABLE max_parts(key STRING) PARTITIONED BY (value STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@max_parts diff --git a/ql/src/test/results/clientnegative/dynamic_partitions_with_whitelist.q.out b/ql/src/test/results/clientnegative/dynamic_partitions_with_whitelist.q.out index 654d892..cb5ee9b 100644 --- a/ql/src/test/results/clientnegative/dynamic_partitions_with_whitelist.q.out +++ b/ql/src/test/results/clientnegative/dynamic_partitions_with_whitelist.q.out @@ -23,11 +23,7 @@ POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@source_table POSTHOOK: Output: default@source_table@ds=2008-04-08/hr=11 -PREHOOK: query: -- Tests creating dynamic partitions with characters not in the whitelist (i.e. 9) --- If the directory is not empty the hook will throw an error, instead the error should come from the metastore --- This shows that no dynamic partitions were created and left behind or had directories created - -insert overwrite table dest_table partition (ds, hr) select key, hr, ds, value from source_table where ds='2008-04-08' and value='val_129' order by value asc +PREHOOK: query: insert overwrite table dest_table partition (ds, hr) select key, hr, ds, value from source_table where ds='2008-04-08' and value='val_129' order by value asc PREHOOK: type: QUERY PREHOOK: Input: default@source_table PREHOOK: Input: default@source_table@ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientnegative/illegal_partition_type.q.out b/ql/src/test/results/clientnegative/illegal_partition_type.q.out index fce549b..5a9ec4e 100644 --- a/ql/src/test/results/clientnegative/illegal_partition_type.q.out +++ b/ql/src/test/results/clientnegative/illegal_partition_type.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- begin part(string, int) pass(string, string) -CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' +PREHOOK: query: CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tab1 -POSTHOOK: query: -- begin part(string, int) pass(string, string) -CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' +POSTHOOK: query: CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day int) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tab1 diff --git a/ql/src/test/results/clientnegative/insert_into_with_schema.q.out b/ql/src/test/results/clientnegative/insert_into_with_schema.q.out index d27803b..660b39c 100644 --- a/ql/src/test/results/clientnegative/insert_into_with_schema.q.out +++ b/ql/src/test/results/clientnegative/insert_into_with_schema.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- set of tests HIVE-9481 -drop database if exists x314n cascade +PREHOOK: query: drop database if exists x314n cascade PREHOOK: type: DROPDATABASE -POSTHOOK: query: -- set of tests HIVE-9481 -drop database if exists x314n cascade +POSTHOOK: query: drop database if exists x314n cascade POSTHOOK: type: DROPDATABASE PREHOOK: query: create database x314n PREHOOK: type: CREATEDATABASE diff --git a/ql/src/test/results/clientnegative/insert_into_with_schema1.q.out b/ql/src/test/results/clientnegative/insert_into_with_schema1.q.out index 11e583d..b1cb29b 100644 --- a/ql/src/test/results/clientnegative/insert_into_with_schema1.q.out +++ b/ql/src/test/results/clientnegative/insert_into_with_schema1.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- set of tests HIVE-9481 -drop database if exists x314n cascade +PREHOOK: query: drop database if exists x314n cascade PREHOOK: type: DROPDATABASE -POSTHOOK: query: -- set of tests HIVE-9481 -drop database if exists x314n cascade +POSTHOOK: query: drop database if exists x314n cascade POSTHOOK: type: DROPDATABASE PREHOOK: query: create database x314n PREHOOK: type: CREATEDATABASE diff --git a/ql/src/test/results/clientnegative/insert_into_with_schema2.q.out b/ql/src/test/results/clientnegative/insert_into_with_schema2.q.out index 1a8260e..ed55b89 100644 --- a/ql/src/test/results/clientnegative/insert_into_with_schema2.q.out +++ b/ql/src/test/results/clientnegative/insert_into_with_schema2.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- set of tests HIVE-9481 -drop database if exists x314n cascade +PREHOOK: query: drop database if exists x314n cascade PREHOOK: type: DROPDATABASE -POSTHOOK: query: -- set of tests HIVE-9481 -drop database if exists x314n cascade +POSTHOOK: query: drop database if exists x314n cascade POSTHOOK: type: DROPDATABASE PREHOOK: query: create database x314n PREHOOK: type: CREATEDATABASE diff --git a/ql/src/test/results/clientnegative/insert_into_with_schema3.q.out b/ql/src/test/results/clientnegative/insert_into_with_schema3.q.out index 54bbb46..effc6ea 100644 --- a/ql/src/test/results/clientnegative/insert_into_with_schema3.q.out +++ b/ql/src/test/results/clientnegative/insert_into_with_schema3.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- set of tests HIVE-9481 -drop database if exists x314n cascade +PREHOOK: query: drop database if exists x314n cascade PREHOOK: type: DROPDATABASE -POSTHOOK: query: -- set of tests HIVE-9481 -drop database if exists x314n cascade +POSTHOOK: query: drop database if exists x314n cascade POSTHOOK: type: DROPDATABASE PREHOOK: query: create database x314n PREHOOK: type: CREATEDATABASE diff --git a/ql/src/test/results/clientnegative/insert_into_with_schema4.q.out b/ql/src/test/results/clientnegative/insert_into_with_schema4.q.out index 9abdf40..deebe8e 100644 --- a/ql/src/test/results/clientnegative/insert_into_with_schema4.q.out +++ b/ql/src/test/results/clientnegative/insert_into_with_schema4.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- set of tests HIVE-9481 -drop database if exists x314n cascade +PREHOOK: query: drop database if exists x314n cascade PREHOOK: type: DROPDATABASE -POSTHOOK: query: -- set of tests HIVE-9481 -drop database if exists x314n cascade +POSTHOOK: query: drop database if exists x314n cascade POSTHOOK: type: DROPDATABASE PREHOOK: query: create database x314n PREHOOK: type: CREATEDATABASE diff --git a/ql/src/test/results/clientnegative/invalid_select_column.q.out b/ql/src/test/results/clientnegative/invalid_select_column.q.out index 88bcde6..fa65032 100644 --- a/ql/src/test/results/clientnegative/invalid_select_column.q.out +++ b/ql/src/test/results/clientnegative/invalid_select_column.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Create table -create table if not exists test_invalid_column(key string, value string ) partitioned by (year string, month string) stored as textfile +PREHOOK: query: create table if not exists test_invalid_column(key string, value string ) partitioned by (year string, month string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_invalid_column -POSTHOOK: query: -- Create table -create table if not exists test_invalid_column(key string, value string ) partitioned by (year string, month string) stored as textfile +POSTHOOK: query: create table if not exists test_invalid_column(key string, value string ) partitioned by (year string, month string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_invalid_column diff --git a/ql/src/test/results/clientnegative/invalid_select_column_with_subquery.q.out b/ql/src/test/results/clientnegative/invalid_select_column_with_subquery.q.out index 14e18c0..157d6f8 100644 --- a/ql/src/test/results/clientnegative/invalid_select_column_with_subquery.q.out +++ b/ql/src/test/results/clientnegative/invalid_select_column_with_subquery.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Create table -create table if not exists test_invalid_column(key string, value string ) partitioned by (year string, month string) stored as textfile +PREHOOK: query: create table if not exists test_invalid_column(key string, value string ) partitioned by (year string, month string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_invalid_column -POSTHOOK: query: -- Create table -create table if not exists test_invalid_column(key string, value string ) partitioned by (year string, month string) stored as textfile +POSTHOOK: query: create table if not exists test_invalid_column(key string, value string ) partitioned by (year string, month string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_invalid_column diff --git a/ql/src/test/results/clientnegative/invalid_select_column_with_tablename.q.out b/ql/src/test/results/clientnegative/invalid_select_column_with_tablename.q.out index 5f8bd12..ddee648 100644 --- a/ql/src/test/results/clientnegative/invalid_select_column_with_tablename.q.out +++ b/ql/src/test/results/clientnegative/invalid_select_column_with_tablename.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Create table -create table if not exists test_invalid_column(key string, value string ) partitioned by (year string, month string) stored as textfile +PREHOOK: query: create table if not exists test_invalid_column(key string, value string ) partitioned by (year string, month string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_invalid_column -POSTHOOK: query: -- Create table -create table if not exists test_invalid_column(key string, value string ) partitioned by (year string, month string) stored as textfile +POSTHOOK: query: create table if not exists test_invalid_column(key string, value string ) partitioned by (year string, month string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_invalid_column diff --git a/ql/src/test/results/clientnegative/invalidate_view1.q.out b/ql/src/test/results/clientnegative/invalidate_view1.q.out index 7eff2b2..2da46bb 100644 --- a/ql/src/test/results/clientnegative/invalidate_view1.q.out +++ b/ql/src/test/results/clientnegative/invalidate_view1.q.out @@ -6,17 +6,11 @@ PREHOOK: query: DROP VIEW xxx9 PREHOOK: type: DROPVIEW POSTHOOK: query: DROP VIEW xxx9 POSTHOOK: type: DROPVIEW -PREHOOK: query: -- create two levels of view reference, then invalidate intermediate view --- by dropping a column from underlying table, and verify that --- querying outermost view results in full error context -CREATE TABLE xxx10 (key int, value int) +PREHOOK: query: CREATE TABLE xxx10 (key int, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@xxx10 -POSTHOOK: query: -- create two levels of view reference, then invalidate intermediate view --- by dropping a column from underlying table, and verify that --- querying outermost view results in full error context -CREATE TABLE xxx10 (key int, value int) +POSTHOOK: query: CREATE TABLE xxx10 (key int, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@xxx10 diff --git a/ql/src/test/results/clientnegative/load_stored_as_dirs.q.out b/ql/src/test/results/clientnegative/load_stored_as_dirs.q.out index 22d4ee8..bcc50e5 100644 --- a/ql/src/test/results/clientnegative/load_stored_as_dirs.q.out +++ b/ql/src/test/results/clientnegative/load_stored_as_dirs.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Load data can't work with table with stored as directories -CREATE TABLE if not exists stored_as_dirs_multiple (col1 STRING, col2 int, col3 STRING) +PREHOOK: query: CREATE TABLE if not exists stored_as_dirs_multiple (col1 STRING, col2 int, col3 STRING) SKEWED BY (col1, col2) ON (('s1',1), ('s3',3), ('s13',13), ('s78',78)) stored as DIRECTORIES PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@stored_as_dirs_multiple -POSTHOOK: query: -- Load data can't work with table with stored as directories -CREATE TABLE if not exists stored_as_dirs_multiple (col1 STRING, col2 int, col3 STRING) +POSTHOOK: query: CREATE TABLE if not exists stored_as_dirs_multiple (col1 STRING, col2 int, col3 STRING) SKEWED BY (col1, col2) ON (('s1',1), ('s3',3), ('s13',13), ('s78',78)) stored as DIRECTORIES POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientnegative/load_wrong_fileformat.q.out b/ql/src/test/results/clientnegative/load_wrong_fileformat.q.out index 8ec0058..25b590c 100644 --- a/ql/src/test/results/clientnegative/load_wrong_fileformat.q.out +++ b/ql/src/test/results/clientnegative/load_wrong_fileformat.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- test for loading into tables with the correct file format --- test for loading into partitions with the correct file format - - -CREATE TABLE load_wrong_fileformat_T1(name STRING) STORED AS SEQUENCEFILE +PREHOOK: query: CREATE TABLE load_wrong_fileformat_T1(name STRING) STORED AS SEQUENCEFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@load_wrong_fileformat_T1 -POSTHOOK: query: -- test for loading into tables with the correct file format --- test for loading into partitions with the correct file format - - -CREATE TABLE load_wrong_fileformat_T1(name STRING) STORED AS SEQUENCEFILE +POSTHOOK: query: CREATE TABLE load_wrong_fileformat_T1(name STRING) STORED AS SEQUENCEFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@load_wrong_fileformat_T1 diff --git a/ql/src/test/results/clientnegative/load_wrong_fileformat_rc_seq.q.out b/ql/src/test/results/clientnegative/load_wrong_fileformat_rc_seq.q.out index 916eca4..1aec723 100644 --- a/ql/src/test/results/clientnegative/load_wrong_fileformat_rc_seq.q.out +++ b/ql/src/test/results/clientnegative/load_wrong_fileformat_rc_seq.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- test for loading into tables with the correct file format --- test for loading into partitions with the correct file format - - -CREATE TABLE T1(name STRING) STORED AS RCFILE +PREHOOK: query: CREATE TABLE T1(name STRING) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- test for loading into tables with the correct file format --- test for loading into partitions with the correct file format - - -CREATE TABLE T1(name STRING) STORED AS RCFILE +POSTHOOK: query: CREATE TABLE T1(name STRING) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 diff --git a/ql/src/test/results/clientnegative/load_wrong_fileformat_txt_seq.q.out b/ql/src/test/results/clientnegative/load_wrong_fileformat_txt_seq.q.out index 645ece6..7cf6820 100644 --- a/ql/src/test/results/clientnegative/load_wrong_fileformat_txt_seq.q.out +++ b/ql/src/test/results/clientnegative/load_wrong_fileformat_txt_seq.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- test for loading into tables with the correct file format --- test for loading into partitions with the correct file format - - -CREATE TABLE T1(name STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1(name STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- test for loading into tables with the correct file format --- test for loading into partitions with the correct file format - - -CREATE TABLE T1(name STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE T1(name STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 diff --git a/ql/src/test/results/clientnegative/orc_replace_columns2.q.out b/ql/src/test/results/clientnegative/orc_replace_columns2.q.out index 83b55f9..2316bbb 100644 --- a/ql/src/test/results/clientnegative/orc_replace_columns2.q.out +++ b/ql/src/test/results/clientnegative/orc_replace_columns2.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Currently, string to int conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key tinyint, val string) stored as orc +PREHOOK: query: create table src_orc (key tinyint, val string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_orc -POSTHOOK: query: -- Currently, string to int conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key tinyint, val string) stored as orc +POSTHOOK: query: create table src_orc (key tinyint, val string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_orc diff --git a/ql/src/test/results/clientnegative/orc_replace_columns2_acid.q.out b/ql/src/test/results/clientnegative/orc_replace_columns2_acid.q.out index 2ef833d..e01b7b9 100644 --- a/ql/src/test/results/clientnegative/orc_replace_columns2_acid.q.out +++ b/ql/src/test/results/clientnegative/orc_replace_columns2_acid.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Currently, string to int conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key tinyint, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: query: create table src_orc (key tinyint, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_orc -POSTHOOK: query: -- Currently, string to int conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key tinyint, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: query: create table src_orc (key tinyint, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_orc diff --git a/ql/src/test/results/clientnegative/orc_replace_columns3.q.out b/ql/src/test/results/clientnegative/orc_replace_columns3.q.out index 54408a0..a7b3b72 100644 --- a/ql/src/test/results/clientnegative/orc_replace_columns3.q.out +++ b/ql/src/test/results/clientnegative/orc_replace_columns3.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Currently, smallint to tinyint conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key smallint, val string) stored as orc +PREHOOK: query: create table src_orc (key smallint, val string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_orc -POSTHOOK: query: -- Currently, smallint to tinyint conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key smallint, val string) stored as orc +POSTHOOK: query: create table src_orc (key smallint, val string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_orc diff --git a/ql/src/test/results/clientnegative/orc_replace_columns3_acid.q.out b/ql/src/test/results/clientnegative/orc_replace_columns3_acid.q.out index 3aefca0..b82ad57 100644 --- a/ql/src/test/results/clientnegative/orc_replace_columns3_acid.q.out +++ b/ql/src/test/results/clientnegative/orc_replace_columns3_acid.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Currently, smallint to tinyint conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key smallint, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: query: create table src_orc (key smallint, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_orc -POSTHOOK: query: -- Currently, smallint to tinyint conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key smallint, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: query: create table src_orc (key smallint, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_orc diff --git a/ql/src/test/results/clientnegative/orc_type_promotion1.q.out b/ql/src/test/results/clientnegative/orc_type_promotion1.q.out index 080cfd0..f452836 100644 --- a/ql/src/test/results/clientnegative/orc_type_promotion1.q.out +++ b/ql/src/test/results/clientnegative/orc_type_promotion1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Currently, string to int conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key string, val string) stored as orc +PREHOOK: query: create table src_orc (key string, val string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_orc -POSTHOOK: query: -- Currently, string to int conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key string, val string) stored as orc +POSTHOOK: query: create table src_orc (key string, val string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_orc diff --git a/ql/src/test/results/clientnegative/orc_type_promotion1_acid.q.out b/ql/src/test/results/clientnegative/orc_type_promotion1_acid.q.out index f3b1ae8..49800a9 100644 --- a/ql/src/test/results/clientnegative/orc_type_promotion1_acid.q.out +++ b/ql/src/test/results/clientnegative/orc_type_promotion1_acid.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Currently, string to int conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key string, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: query: create table src_orc (key string, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_orc -POSTHOOK: query: -- Currently, string to int conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key string, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: query: create table src_orc (key string, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_orc diff --git a/ql/src/test/results/clientnegative/orc_type_promotion2.q.out b/ql/src/test/results/clientnegative/orc_type_promotion2.q.out index 4205901..740ee1e 100644 --- a/ql/src/test/results/clientnegative/orc_type_promotion2.q.out +++ b/ql/src/test/results/clientnegative/orc_type_promotion2.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Currently, bigint to int conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key smallint, val string) stored as orc +PREHOOK: query: create table src_orc (key smallint, val string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_orc -POSTHOOK: query: -- Currently, bigint to int conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key smallint, val string) stored as orc +POSTHOOK: query: create table src_orc (key smallint, val string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_orc diff --git a/ql/src/test/results/clientnegative/orc_type_promotion2_acid.q.out b/ql/src/test/results/clientnegative/orc_type_promotion2_acid.q.out index 9129782..28c789a 100644 --- a/ql/src/test/results/clientnegative/orc_type_promotion2_acid.q.out +++ b/ql/src/test/results/clientnegative/orc_type_promotion2_acid.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Currently, bigint to int conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key smallint, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: query: create table src_orc (key smallint, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_orc -POSTHOOK: query: -- Currently, bigint to int conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key smallint, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: query: create table src_orc (key smallint, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_orc diff --git a/ql/src/test/results/clientnegative/orc_type_promotion3.q.out b/ql/src/test/results/clientnegative/orc_type_promotion3.q.out index 1872803..4f97e31 100644 --- a/ql/src/test/results/clientnegative/orc_type_promotion3.q.out +++ b/ql/src/test/results/clientnegative/orc_type_promotion3.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Currently, double to smallint conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key double, val string) stored as orc +PREHOOK: query: create table src_orc (key double, val string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_orc -POSTHOOK: query: -- Currently, double to smallint conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key double, val string) stored as orc +POSTHOOK: query: create table src_orc (key double, val string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_orc diff --git a/ql/src/test/results/clientnegative/orc_type_promotion3_acid.q.out b/ql/src/test/results/clientnegative/orc_type_promotion3_acid.q.out index bd33c6c..a214985 100644 --- a/ql/src/test/results/clientnegative/orc_type_promotion3_acid.q.out +++ b/ql/src/test/results/clientnegative/orc_type_promotion3_acid.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Currently, double to smallint conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key double, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: query: create table src_orc (key double, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_orc -POSTHOOK: query: -- Currently, double to smallint conversion is not supported because it isn't in the lossless --- TypeIntoUtils.implicitConvertible conversions. -create table src_orc (key double, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: query: create table src_orc (key double, val string) clustered by (val) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_orc diff --git a/ql/src/test/results/clientnegative/recursive_view.q.out b/ql/src/test/results/clientnegative/recursive_view.q.out index f51453c..ca785f1 100644 --- a/ql/src/test/results/clientnegative/recursive_view.q.out +++ b/ql/src/test/results/clientnegative/recursive_view.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- Can't have recursive views - -drop table t +PREHOOK: query: drop table t PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Can't have recursive views - -drop table t +POSTHOOK: query: drop table t POSTHOOK: type: DROPTABLE PREHOOK: query: drop view r0 PREHOOK: type: DROPVIEW diff --git a/ql/src/test/results/clientnegative/script_broken_pipe2.q.out b/ql/src/test/results/clientnegative/script_broken_pipe2.q.out index 7e186a0..2202bf0 100644 --- a/ql/src/test/results/clientnegative/script_broken_pipe2.q.out +++ b/ql/src/test/results/clientnegative/script_broken_pipe2.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- Tests exception in ScriptOperator.processOp() by passing extra data needed to fill pipe buffer -SELECT TRANSFORM(key, value, key, value, key, value, key, value, key, value, key, value, key, value, key, value, key, value, key, value, key, value, key, value) USING 'true' as a,b,c,d FROM src +PREHOOK: query: SELECT TRANSFORM(key, value, key, value, key, value, key, value, key, value, key, value, key, value, key, value, key, value, key, value, key, value, key, value) USING 'true' as a,b,c,d FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientnegative/script_broken_pipe3.q.out b/ql/src/test/results/clientnegative/script_broken_pipe3.q.out index 575b8f2..21415a6 100644 --- a/ql/src/test/results/clientnegative/script_broken_pipe3.q.out +++ b/ql/src/test/results/clientnegative/script_broken_pipe3.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- Test to ensure that a script with a bad error code still fails even with partial consumption -SELECT TRANSFORM(*) USING 'false' AS a, b FROM (SELECT TRANSFORM(*) USING 'echo' AS a, b FROM src LIMIT 1) tmp +PREHOOK: query: SELECT TRANSFORM(*) USING 'false' AS a, b FROM (SELECT TRANSFORM(*) USING 'echo' AS a, b FROM src LIMIT 1) tmp PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientnegative/selectDistinctStarNeg_1.q.out b/ql/src/test/results/clientnegative/selectDistinctStarNeg_1.q.out index 52948c8..2d97755 100644 --- a/ql/src/test/results/clientnegative/selectDistinctStarNeg_1.q.out +++ b/ql/src/test/results/clientnegative/selectDistinctStarNeg_1.q.out @@ -1,9 +1,5 @@ -PREHOOK: query: -- Duplicate column name: key - -drop view if exists v +PREHOOK: query: drop view if exists v PREHOOK: type: DROPVIEW -POSTHOOK: query: -- Duplicate column name: key - -drop view if exists v +POSTHOOK: query: drop view if exists v POSTHOOK: type: DROPVIEW FAILED: SemanticException [Error 10036]: Duplicate column name: key diff --git a/ql/src/test/results/clientnegative/serde_regex.q.out b/ql/src/test/results/clientnegative/serde_regex.q.out index 7892bb2..a1ec5ca 100644 --- a/ql/src/test/results/clientnegative/serde_regex.q.out +++ b/ql/src/test/results/clientnegative/serde_regex.q.out @@ -4,8 +4,7 @@ PREHOOK: Input: database:default POSTHOOK: query: USE default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: -- This should fail because Regex SerDe doesn't support STRUCT -CREATE TABLE serde_regex( +PREHOOK: query: CREATE TABLE serde_regex( host STRING, identity STRING, `user` STRING, diff --git a/ql/src/test/results/clientnegative/serde_regex2.q.out b/ql/src/test/results/clientnegative/serde_regex2.q.out index 1ceb387..374675d 100644 --- a/ql/src/test/results/clientnegative/serde_regex2.q.out +++ b/ql/src/test/results/clientnegative/serde_regex2.q.out @@ -4,8 +4,7 @@ PREHOOK: Input: database:default POSTHOOK: query: USE default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: -- Mismatch between the number of matching groups and columns, throw run time exception. Ideally this should throw a compile time exception. See JIRA-3023 for more details. - CREATE TABLE serde_regex( +PREHOOK: query: CREATE TABLE serde_regex( host STRING, identity STRING, `user` STRING, @@ -23,8 +22,7 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@serde_regex -POSTHOOK: query: -- Mismatch between the number of matching groups and columns, throw run time exception. Ideally this should throw a compile time exception. See JIRA-3023 for more details. - CREATE TABLE serde_regex( +POSTHOOK: query: CREATE TABLE serde_regex( host STRING, identity STRING, `user` STRING, @@ -58,8 +56,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/apache.access.2.log" I POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@serde_regex -PREHOOK: query: -- raise an exception -SELECT * FROM serde_regex ORDER BY time +PREHOOK: query: SELECT * FROM serde_regex ORDER BY time PREHOOK: type: QUERY PREHOOK: Input: default@serde_regex #### A masked pattern was here #### diff --git a/ql/src/test/results/clientnegative/serde_regex3.q.out b/ql/src/test/results/clientnegative/serde_regex3.q.out index 028a24f..dc0a0e2 100644 --- a/ql/src/test/results/clientnegative/serde_regex3.q.out +++ b/ql/src/test/results/clientnegative/serde_regex3.q.out @@ -4,8 +4,7 @@ PREHOOK: Input: database:default POSTHOOK: query: USE default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: -- null input.regex, raise an exception - CREATE TABLE serde_regex( +PREHOOK: query: CREATE TABLE serde_regex( host STRING, identity STRING, `user` STRING, diff --git a/ql/src/test/results/clientnegative/set_hiveconf_internal_variable0.q.out b/ql/src/test/results/clientnegative/set_hiveconf_internal_variable0.q.out index 61dafb4..5d018c4 100644 --- a/ql/src/test/results/clientnegative/set_hiveconf_internal_variable0.q.out +++ b/ql/src/test/results/clientnegative/set_hiveconf_internal_variable0.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- should fail: for some internal variables which should not be settable via set command -desc src +PREHOOK: query: desc src PREHOOK: type: DESCTABLE PREHOOK: Input: default@src -POSTHOOK: query: -- should fail: for some internal variables which should not be settable via set command -desc src +POSTHOOK: query: desc src POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@src key string default diff --git a/ql/src/test/results/clientnegative/set_hiveconf_internal_variable1.q.out b/ql/src/test/results/clientnegative/set_hiveconf_internal_variable1.q.out index ae2dafb..07cfe23 100644 --- a/ql/src/test/results/clientnegative/set_hiveconf_internal_variable1.q.out +++ b/ql/src/test/results/clientnegative/set_hiveconf_internal_variable1.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- should fail: hive.conf.internal.variable.list is in restricted list -desc src +PREHOOK: query: desc src PREHOOK: type: DESCTABLE PREHOOK: Input: default@src -POSTHOOK: query: -- should fail: hive.conf.internal.variable.list is in restricted list -desc src +POSTHOOK: query: desc src POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@src key string default diff --git a/ql/src/test/results/clientnegative/set_hiveconf_validation0.q.out b/ql/src/test/results/clientnegative/set_hiveconf_validation0.q.out index b792a67..b0fc558 100644 --- a/ql/src/test/results/clientnegative/set_hiveconf_validation0.q.out +++ b/ql/src/test/results/clientnegative/set_hiveconf_validation0.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- should fail: hive.join.cache.size accepts int type -desc src +PREHOOK: query: desc src PREHOOK: type: DESCTABLE PREHOOK: Input: default@src -POSTHOOK: query: -- should fail: hive.join.cache.size accepts int type -desc src +POSTHOOK: query: desc src POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@src key string default diff --git a/ql/src/test/results/clientnegative/set_hiveconf_validation1.q.out b/ql/src/test/results/clientnegative/set_hiveconf_validation1.q.out index b55ad50..ed4b03d 100644 --- a/ql/src/test/results/clientnegative/set_hiveconf_validation1.q.out +++ b/ql/src/test/results/clientnegative/set_hiveconf_validation1.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- should fail: hive.map.aggr.hash.min.reduction accepts float type -desc src +PREHOOK: query: desc src PREHOOK: type: DESCTABLE PREHOOK: Input: default@src -POSTHOOK: query: -- should fail: hive.map.aggr.hash.min.reduction accepts float type -desc src +POSTHOOK: query: desc src POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@src key string default diff --git a/ql/src/test/results/clientnegative/set_hiveconf_validation2.q.out b/ql/src/test/results/clientnegative/set_hiveconf_validation2.q.out index af004a1..873c149 100644 --- a/ql/src/test/results/clientnegative/set_hiveconf_validation2.q.out +++ b/ql/src/test/results/clientnegative/set_hiveconf_validation2.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- should fail: hive.fetch.task.conversion accepts none, minimal or more -desc src +PREHOOK: query: desc src PREHOOK: type: DESCTABLE PREHOOK: Input: default@src -POSTHOOK: query: -- should fail: hive.fetch.task.conversion accepts none, minimal or more -desc src +POSTHOOK: query: desc src POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@src key string default diff --git a/ql/src/test/results/clientnegative/sortmerge_mapjoin_mismatch_1.q.out b/ql/src/test/results/clientnegative/sortmerge_mapjoin_mismatch_1.q.out index 8d3bf44..5345d7c 100644 --- a/ql/src/test/results/clientnegative/sortmerge_mapjoin_mismatch_1.q.out +++ b/ql/src/test/results/clientnegative/sortmerge_mapjoin_mismatch_1.q.out @@ -38,24 +38,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@table_desc POSTHOOK: Lineage: table_desc.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: table_desc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- If the user asked for sort merge join to be enforced (by setting --- hive.enforce.sortmergebucketmapjoin to true), an error should be thrown, since --- one of the tables is in ascending order and the other is in descending order, --- and sort merge bucket mapjoin cannot be performed. In the default mode, the --- query would succeed, although a regular map-join would be performed instead of --- what the user asked. - -explain +PREHOOK: query: explain select /*+mapjoin(a)*/ * from table_asc a join table_desc b on a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- If the user asked for sort merge join to be enforced (by setting --- hive.enforce.sortmergebucketmapjoin to true), an error should be thrown, since --- one of the tables is in ascending order and the other is in descending order, --- and sort merge bucket mapjoin cannot be performed. In the default mode, the --- query would succeed, although a regular map-join would be performed instead of --- what the user asked. - -explain +POSTHOOK: query: explain select /*+mapjoin(a)*/ * from table_asc a join table_desc b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out b/ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out index cb8e51c..d7b9965 100644 --- a/ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out +++ b/ql/src/test/results/clientnegative/special_character_in_tabnames_1.q.out @@ -1,9 +1,4 @@ -PREHOOK: query: -- If hive.support.special.characters.tablename=false, we can not use special characters in table names. --- The same query would work when it is set to true(default value). --- Note that there is a positive test with the same name in clientpositive - - -create table `c/b/o_t1`(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE +PREHOOK: query: create table `c/b/o_t1`(key string, value string, c_int int, c_float float, c_boolean boolean) partitioned by (dt string) row format delimited fields terminated by ',' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@c/b/o_t1 diff --git a/ql/src/test/results/clientnegative/stats_aggregator_error_1.q.out b/ql/src/test/results/clientnegative/stats_aggregator_error_1.q.out index 47ba34b..5261e03 100644 --- a/ql/src/test/results/clientnegative/stats_aggregator_error_1.q.out +++ b/ql/src/test/results/clientnegative/stats_aggregator_error_1.q.out @@ -1,20 +1,8 @@ -PREHOOK: query: -- In this test, there is a dummy stats aggregator which throws an error when the --- method connect is called (as indicated by the parameter hive.test.dummystats.aggregator) --- If stats need not be reliable, the statement succeeds. However, if stats are supposed --- to be reliable (by setting hive.stats.reliable to true), the insert statement fails --- because stats cannot be collected for this statement - -create table tmptable(key string, value string) +PREHOOK: query: create table tmptable(key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- In this test, there is a dummy stats aggregator which throws an error when the --- method connect is called (as indicated by the parameter hive.test.dummystats.aggregator) --- If stats need not be reliable, the statement succeeds. However, if stats are supposed --- to be reliable (by setting hive.stats.reliable to true), the insert statement fails --- because stats cannot be collected for this statement - -create table tmptable(key string, value string) +POSTHOOK: query: create table tmptable(key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable diff --git a/ql/src/test/results/clientnegative/stats_aggregator_error_2.q.out b/ql/src/test/results/clientnegative/stats_aggregator_error_2.q.out index ff3e073..70d2ee4 100644 --- a/ql/src/test/results/clientnegative/stats_aggregator_error_2.q.out +++ b/ql/src/test/results/clientnegative/stats_aggregator_error_2.q.out @@ -1,18 +1,8 @@ -PREHOOK: query: -- In this test, the stats aggregator does not exists. --- If stats need not be reliable, the statement succeeds. However, if stats are supposed --- to be reliable (by setting hive.stats.reliable to true), the insert statement fails --- because stats cannot be collected for this statement - -create table tmptable(key string, value string) +PREHOOK: query: create table tmptable(key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- In this test, the stats aggregator does not exists. --- If stats need not be reliable, the statement succeeds. However, if stats are supposed --- to be reliable (by setting hive.stats.reliable to true), the insert statement fails --- because stats cannot be collected for this statement - -create table tmptable(key string, value string) +POSTHOOK: query: create table tmptable(key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable diff --git a/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out b/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out index 4faf327..a88e0b8 100644 --- a/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out +++ b/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out @@ -1,16 +1,10 @@ -PREHOOK: query: -- test analyze table ... compute statistics partialscan - --- 1. prepare data -CREATE table analyze_srcpart_partial_scan (key STRING, value STRING) +PREHOOK: query: CREATE table analyze_srcpart_partial_scan (key STRING, value STRING) partitioned by (ds string, hr string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@analyze_srcpart_partial_scan -POSTHOOK: query: -- test analyze table ... compute statistics partialscan - --- 1. prepare data -CREATE table analyze_srcpart_partial_scan (key STRING, value STRING) +POSTHOOK: query: CREATE table analyze_srcpart_partial_scan (key STRING, value STRING) partitioned by (ds string, hr string) stored as rcfile POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientnegative/stats_partscan_norcfile.q.out b/ql/src/test/results/clientnegative/stats_partscan_norcfile.q.out index 7f7763b..38a62b0 100644 --- a/ql/src/test/results/clientnegative/stats_partscan_norcfile.q.out +++ b/ql/src/test/results/clientnegative/stats_partscan_norcfile.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- test analyze table ... compute statistics partialscan - -create table analyze_srcpart_partial_scan like srcpart +PREHOOK: query: create table analyze_srcpart_partial_scan like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@analyze_srcpart_partial_scan -POSTHOOK: query: -- test analyze table ... compute statistics partialscan - -create table analyze_srcpart_partial_scan like srcpart +POSTHOOK: query: create table analyze_srcpart_partial_scan like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@analyze_srcpart_partial_scan diff --git a/ql/src/test/results/clientnegative/stats_publisher_error_1.q.out b/ql/src/test/results/clientnegative/stats_publisher_error_1.q.out index f0e4034..91f9090 100644 --- a/ql/src/test/results/clientnegative/stats_publisher_error_1.q.out +++ b/ql/src/test/results/clientnegative/stats_publisher_error_1.q.out @@ -1,20 +1,8 @@ -PREHOOK: query: -- In this test, there is a dummy stats publisher which throws an error when the --- method connect is called (as indicated by the parameter hive.test.dummystats.publisher) --- If stats need not be reliable, the statement succeeds. However, if stats are supposed --- to be reliable (by setting hive.stats.reliable to true), the insert statement fails --- because stats cannot be collected for this statement - -create table tmptable(key string, value string) +PREHOOK: query: create table tmptable(key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- In this test, there is a dummy stats publisher which throws an error when the --- method connect is called (as indicated by the parameter hive.test.dummystats.publisher) --- If stats need not be reliable, the statement succeeds. However, if stats are supposed --- to be reliable (by setting hive.stats.reliable to true), the insert statement fails --- because stats cannot be collected for this statement - -create table tmptable(key string, value string) +POSTHOOK: query: create table tmptable(key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable diff --git a/ql/src/test/results/clientnegative/stats_publisher_error_2.q.out b/ql/src/test/results/clientnegative/stats_publisher_error_2.q.out index c10108b..70d2ee4 100644 --- a/ql/src/test/results/clientnegative/stats_publisher_error_2.q.out +++ b/ql/src/test/results/clientnegative/stats_publisher_error_2.q.out @@ -1,18 +1,8 @@ -PREHOOK: query: -- In this test, the stats publisher does not exists. --- If stats need not be reliable, the statement succeeds. However, if stats are supposed --- to be reliable (by setting hive.stats.reliable to true), the insert statement fails --- because stats cannot be collected for this statement - -create table tmptable(key string, value string) +PREHOOK: query: create table tmptable(key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- In this test, the stats publisher does not exists. --- If stats need not be reliable, the statement succeeds. However, if stats are supposed --- to be reliable (by setting hive.stats.reliable to true), the insert statement fails --- because stats cannot be collected for this statement - -create table tmptable(key string, value string) +POSTHOOK: query: create table tmptable(key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable diff --git a/ql/src/test/results/clientnegative/truncate_bucketed_column.q.out b/ql/src/test/results/clientnegative/truncate_bucketed_column.q.out index 80981e8..573996b 100644 --- a/ql/src/test/results/clientnegative/truncate_bucketed_column.q.out +++ b/ql/src/test/results/clientnegative/truncate_bucketed_column.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Tests truncating a bucketed column - -CREATE TABLE test_tab (key STRING, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS RCFILE +PREHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Tests truncating a bucketed column - -CREATE TABLE test_tab (key STRING, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS RCFILE +POSTHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_tab diff --git a/ql/src/test/results/clientnegative/truncate_column_indexed_table.q.out b/ql/src/test/results/clientnegative/truncate_column_indexed_table.q.out index 2844c7b..451fdba 100644 --- a/ql/src/test/results/clientnegative/truncate_column_indexed_table.q.out +++ b/ql/src/test/results/clientnegative/truncate_column_indexed_table.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Tests truncating a column from an indexed table - -CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE +PREHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Tests truncating a column from an indexed table - -CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE +POSTHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_tab diff --git a/ql/src/test/results/clientnegative/truncate_column_list_bucketing.q.out b/ql/src/test/results/clientnegative/truncate_column_list_bucketing.q.out index ede7647..3b6ddde 100644 --- a/ql/src/test/results/clientnegative/truncate_column_list_bucketing.q.out +++ b/ql/src/test/results/clientnegative/truncate_column_list_bucketing.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Tests truncating a column on which a table is list bucketed - -CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE +PREHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Tests truncating a column on which a table is list bucketed - -CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE +POSTHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_tab diff --git a/ql/src/test/results/clientnegative/truncate_column_seqfile.q.out b/ql/src/test/results/clientnegative/truncate_column_seqfile.q.out index 2fb494b..7fc8519 100644 --- a/ql/src/test/results/clientnegative/truncate_column_seqfile.q.out +++ b/ql/src/test/results/clientnegative/truncate_column_seqfile.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Tests truncating a column from a table stored as a sequence file - -CREATE TABLE test_tab (key STRING, value STRING) STORED AS SEQUENCEFILE +PREHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) STORED AS SEQUENCEFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Tests truncating a column from a table stored as a sequence file - -CREATE TABLE test_tab (key STRING, value STRING) STORED AS SEQUENCEFILE +POSTHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) STORED AS SEQUENCEFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_tab diff --git a/ql/src/test/results/clientnegative/truncate_nonexistant_column.q.out b/ql/src/test/results/clientnegative/truncate_nonexistant_column.q.out index 8112fa6..2c731e0 100644 --- a/ql/src/test/results/clientnegative/truncate_nonexistant_column.q.out +++ b/ql/src/test/results/clientnegative/truncate_nonexistant_column.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Tests attempting to truncate a column in a table that doesn't exist - -CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE +PREHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Tests attempting to truncate a column in a table that doesn't exist - -CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE +POSTHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_tab diff --git a/ql/src/test/results/clientnegative/truncate_partition_column.q.out b/ql/src/test/results/clientnegative/truncate_partition_column.q.out index 32eccb1..a93ece3 100644 --- a/ql/src/test/results/clientnegative/truncate_partition_column.q.out +++ b/ql/src/test/results/clientnegative/truncate_partition_column.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Tests truncating a partition column - -CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE +PREHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Tests truncating a partition column - -CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE +POSTHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_tab diff --git a/ql/src/test/results/clientnegative/truncate_partition_column2.q.out b/ql/src/test/results/clientnegative/truncate_partition_column2.q.out index baaf610..ff65993 100644 --- a/ql/src/test/results/clientnegative/truncate_partition_column2.q.out +++ b/ql/src/test/results/clientnegative/truncate_partition_column2.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Tests truncating a partition column - -CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE +PREHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Tests truncating a partition column - -CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE +POSTHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_tab diff --git a/ql/src/test/results/clientnegative/udf_sort_array_by_wrong3.q.out b/ql/src/test/results/clientnegative/udf_sort_array_by_wrong3.q.out index 09b3594..2a4e749 100644 --- a/ql/src/test/results/clientnegative/udf_sort_array_by_wrong3.q.out +++ b/ql/src/test/results/clientnegative/udf_sort_array_by_wrong3.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- invalid field name in side struct - -DROP TABLE IF EXISTS sort_array_by_order_wrong +PREHOOK: query: DROP TABLE IF EXISTS sort_array_by_order_wrong PREHOOK: type: DROPTABLE -POSTHOOK: query: -- invalid field name in side struct - -DROP TABLE IF EXISTS sort_array_by_order_wrong +POSTHOOK: query: DROP TABLE IF EXISTS sort_array_by_order_wrong POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE sort_array_by_order_wrong STORED AS TEXTFILE diff --git a/ql/src/test/results/clientpositive/acid_join.q.out b/ql/src/test/results/clientpositive/acid_join.q.out index 99e94e4..a5c3f93 100644 --- a/ql/src/test/results/clientpositive/acid_join.q.out +++ b/ql/src/test/results/clientpositive/acid_join.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- This test checks that a join with tables with two different buckets send the right bucket info to each table. -create table acidjoin1(name varchar(50), age int) clustered by (age) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true") +PREHOOK: query: create table acidjoin1(name varchar(50), age int) clustered by (age) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acidjoin1 -POSTHOOK: query: -- This test checks that a join with tables with two different buckets send the right bucket info to each table. -create table acidjoin1(name varchar(50), age int) clustered by (age) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true") +POSTHOOK: query: create table acidjoin1(name varchar(50), age int) clustered by (age) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@acidjoin1 diff --git a/ql/src/test/results/clientpositive/acid_table_stats.q.out b/ql/src/test/results/clientpositive/acid_table_stats.q.out index 58d391d..08cd00a 100644 --- a/ql/src/test/results/clientpositive/acid_table_stats.q.out +++ b/ql/src/test/results/clientpositive/acid_table_stats.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- single level partition, sorted dynamic partition enabled -drop table acid +PREHOOK: query: drop table acid PREHOOK: type: DROPTABLE -POSTHOOK: query: -- single level partition, sorted dynamic partition enabled -drop table acid +POSTHOOK: query: drop table acid POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE acid(key string, value string) PARTITIONED BY(ds string) CLUSTERED BY(key) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE @@ -94,7 +92,7 @@ Partition Parameters: numFiles 2 numRows 0 rawDataSize 0 - totalSize 3837 + totalSize 3852 #### A masked pattern was here #### # Storage Information @@ -132,9 +130,9 @@ STAGE PLANS: Map Operator Tree: TableScan alias: acid - Statistics: Num rows: 1 Data size: 3837 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1 Data size: 3852 Basic stats: PARTIAL Column stats: NONE Select Operator - Statistics: Num rows: 1 Data size: 3837 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1 Data size: 3852 Basic stats: PARTIAL Column stats: NONE Group By Operator aggregations: count() mode: hash @@ -211,7 +209,7 @@ Partition Parameters: numFiles 2 numRows 1000 rawDataSize 208000 - totalSize 3837 + totalSize 3852 #### A masked pattern was here #### # Storage Information @@ -260,7 +258,7 @@ Partition Parameters: numFiles 2 numRows 1000 rawDataSize 208000 - totalSize 3837 + totalSize 3852 #### A masked pattern was here #### # Storage Information @@ -387,7 +385,7 @@ Partition Parameters: numFiles 4 numRows 1000 rawDataSize 208000 - totalSize 7689 + totalSize 7704 #### A masked pattern was here #### # Storage Information @@ -436,7 +434,7 @@ Partition Parameters: numFiles 4 numRows 2000 rawDataSize 416000 - totalSize 7689 + totalSize 7704 #### A masked pattern was here #### # Storage Information diff --git a/ql/src/test/results/clientpositive/add_part_exist.q.out b/ql/src/test/results/clientpositive/add_part_exist.q.out index 5a69944..f8d50ca 100644 --- a/ql/src/test/results/clientpositive/add_part_exist.q.out +++ b/ql/src/test/results/clientpositive/add_part_exist.q.out @@ -99,12 +99,10 @@ src_thrift srcbucket srcbucket2 srcpart -PREHOOK: query: -- Test ALTER TABLE ADD PARTITION in non-default Database -CREATE DATABASE add_part_test_db +PREHOOK: query: CREATE DATABASE add_part_test_db PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:add_part_test_db -POSTHOOK: query: -- Test ALTER TABLE ADD PARTITION in non-default Database -CREATE DATABASE add_part_test_db +POSTHOOK: query: CREATE DATABASE add_part_test_db POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:add_part_test_db PREHOOK: query: CREATE TABLE add_part_test_db.add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) diff --git a/ql/src/test/results/clientpositive/add_part_multiple.q.out b/ql/src/test/results/clientpositive/add_part_multiple.q.out index 0e6ac21..c452223 100644 --- a/ql/src/test/results/clientpositive/add_part_multiple.q.out +++ b/ql/src/test/results/clientpositive/add_part_multiple.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- HIVE-5122 locations for 2nd, 3rd... partition are ignored - -CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@add_part_test -POSTHOOK: query: -- HIVE-5122 locations for 2nd, 3rd... partition are ignored - -CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@add_part_test diff --git a/ql/src/test/results/clientpositive/add_partition_no_whitelist.q.out b/ql/src/test/results/clientpositive/add_partition_no_whitelist.q.out index 4c4be4a..9d66dd7 100644 --- a/ql/src/test/results/clientpositive/add_partition_no_whitelist.q.out +++ b/ql/src/test/results/clientpositive/add_partition_no_whitelist.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Test with no partition name whitelist pattern - -CREATE TABLE part_nowhitelist_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE part_nowhitelist_test (key STRING, value STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@part_nowhitelist_test -POSTHOOK: query: -- Test with no partition name whitelist pattern - -CREATE TABLE part_nowhitelist_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE part_nowhitelist_test (key STRING, value STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@part_nowhitelist_test diff --git a/ql/src/test/results/clientpositive/add_partition_with_whitelist.q.out b/ql/src/test/results/clientpositive/add_partition_with_whitelist.q.out index d996d6d..5274869 100644 --- a/ql/src/test/results/clientpositive/add_partition_with_whitelist.q.out +++ b/ql/src/test/results/clientpositive/add_partition_with_whitelist.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- This pattern matches only letters. - -CREATE TABLE part_whitelist_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE part_whitelist_test (key STRING, value STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@part_whitelist_test -POSTHOOK: query: -- This pattern matches only letters. - -CREATE TABLE part_whitelist_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE part_whitelist_test (key STRING, value STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@part_whitelist_test diff --git a/ql/src/test/results/clientpositive/alias_casted_column.q.out b/ql/src/test/results/clientpositive/alias_casted_column.q.out index e653b630..1d996d8 100644 --- a/ql/src/test/results/clientpositive/alias_casted_column.q.out +++ b/ql/src/test/results/clientpositive/alias_casted_column.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- HIVE-2477 Use name of original expression for name of CAST output -explain select key from (select cast(key as int) from src )t +PREHOOK: query: explain select key from (select cast(key as int) from src )t PREHOOK: type: QUERY -POSTHOOK: query: -- HIVE-2477 Use name of original expression for name of CAST output -explain select key from (select cast(key as int) from src )t +POSTHOOK: query: explain select key from (select cast(key as int) from src )t POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -33,11 +31,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: --backward -explain select key2 from (select cast(key as int) key2 from src )t +PREHOOK: query: explain select key2 from (select cast(key as int) key2 from src )t PREHOOK: type: QUERY -POSTHOOK: query: --backward -explain select key2 from (select cast(key as int) key2 from src )t +POSTHOOK: query: explain select key2 from (select cast(key as int) key2 from src )t POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/allcolref_in_udf.q.out b/ql/src/test/results/clientpositive/allcolref_in_udf.q.out index ecd784e..620b61e 100644 --- a/ql/src/test/results/clientpositive/allcolref_in_udf.q.out +++ b/ql/src/test/results/clientpositive/allcolref_in_udf.q.out @@ -57,18 +57,12 @@ POSTHOOK: Input: default@src 17val_17 ["17","val_17"] 0val_0 ["0","val_0"] 57val_57 ["57","val_57"] -PREHOOK: query: -- The order of columns is decided by row schema of prev operator --- Like join which has two or more aliases, it's from left most aias to right aliases. - -explain +PREHOOK: query: explain select stack(2, *) as (e1,e2,e3) from ( select concat(*), concat(a.*), concat(b.*), concat(a.*, b.key), concat(a.key, b.*) from src a join src b on a.key+1=b.key where a.key < 100) x limit 10 PREHOOK: type: QUERY -POSTHOOK: query: -- The order of columns is decided by row schema of prev operator --- Like join which has two or more aliases, it's from left most aias to right aliases. - -explain +POSTHOOK: query: explain select stack(2, *) as (e1,e2,e3) from ( select concat(*), concat(a.*), concat(b.*), concat(a.*, b.key), concat(a.key, b.*) from src a join src b on a.key+1=b.key where a.key < 100) x limit 10 @@ -172,14 +166,12 @@ POSTHOOK: Input: default@src 8val_89 NULL 9val_9 9val_910val_10 9val_9 10val_10 9val_910 NULL 10val_10 -PREHOOK: query: -- HIVE-4181 TOK_FUNCTIONSTAR for UDTF -create table allcolref as select array(key, value) from src +PREHOOK: query: create table allcolref as select array(key, value) from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@allcolref -POSTHOOK: query: -- HIVE-4181 TOK_FUNCTIONSTAR for UDTF -create table allcolref as select array(key, value) from src +POSTHOOK: query: create table allcolref as select array(key, value) from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/alter1.q.out b/ql/src/test/results/clientpositive/alter1.q.out index 99248c3..c2efbe5 100644 --- a/ql/src/test/results/clientpositive/alter1.q.out +++ b/ql/src/test/results/clientpositive/alter1.q.out @@ -177,13 +177,11 @@ POSTHOOK: Input: default@alter1 a int b int c string -PREHOOK: query: -- Cleanup -DROP TABLE alter1 +PREHOOK: query: DROP TABLE alter1 PREHOOK: type: DROPTABLE PREHOOK: Input: default@alter1 PREHOOK: Output: default@alter1 -POSTHOOK: query: -- Cleanup -DROP TABLE alter1 +POSTHOOK: query: DROP TABLE alter1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@alter1 POSTHOOK: Output: default@alter1 @@ -208,14 +206,10 @@ src_thrift srcbucket srcbucket2 srcpart -PREHOOK: query: -- With non-default Database - -CREATE DATABASE alter1_db +PREHOOK: query: CREATE DATABASE alter1_db PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:alter1_db -POSTHOOK: query: -- With non-default Database - -CREATE DATABASE alter1_db +POSTHOOK: query: CREATE DATABASE alter1_db POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:alter1_db PREHOOK: query: SHOW TABLES alter1_db diff --git a/ql/src/test/results/clientpositive/alter2.q.out b/ql/src/test/results/clientpositive/alter2.q.out index 083d04f..47ed6d6 100644 --- a/ql/src/test/results/clientpositive/alter2.q.out +++ b/ql/src/test/results/clientpositive/alter2.q.out @@ -196,13 +196,11 @@ POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@alter2 insertdate=2008-01-01 insertdate=2008-01-02 -PREHOOK: query: -- Cleanup -DROP TABLE alter2 +PREHOOK: query: DROP TABLE alter2 PREHOOK: type: DROPTABLE PREHOOK: Input: default@alter2 PREHOOK: Output: default@alter2 -POSTHOOK: query: -- Cleanup -DROP TABLE alter2 +POSTHOOK: query: DROP TABLE alter2 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@alter2 POSTHOOK: Output: default@alter2 @@ -227,14 +225,10 @@ src_thrift srcbucket srcbucket2 srcpart -PREHOOK: query: -- Using non-default Database - -CREATE DATABASE alter2_db +PREHOOK: query: CREATE DATABASE alter2_db PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:alter2_db -POSTHOOK: query: -- Using non-default Database - -CREATE DATABASE alter2_db +POSTHOOK: query: CREATE DATABASE alter2_db POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:alter2_db PREHOOK: query: USE alter2_db diff --git a/ql/src/test/results/clientpositive/alter3.q.out b/ql/src/test/results/clientpositive/alter3.q.out index 385073f..ebae93f 100644 --- a/ql/src/test/results/clientpositive/alter3.q.out +++ b/ql/src/test/results/clientpositive/alter3.q.out @@ -153,13 +153,11 @@ pcol1 string pcol2 string #### A masked pattern was here #### -PREHOOK: query: -- Cleanup -DROP TABLE alter3_src +PREHOOK: query: DROP TABLE alter3_src PREHOOK: type: DROPTABLE PREHOOK: Input: default@alter3_src PREHOOK: Output: default@alter3_src -POSTHOOK: query: -- Cleanup -DROP TABLE alter3_src +POSTHOOK: query: DROP TABLE alter3_src POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@alter3_src POSTHOOK: Output: default@alter3_src @@ -200,14 +198,10 @@ src_thrift srcbucket srcbucket2 srcpart -PREHOOK: query: -- With non-default Database - -CREATE DATABASE alter3_db +PREHOOK: query: CREATE DATABASE alter3_db PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:alter3_db -POSTHOOK: query: -- With non-default Database - -CREATE DATABASE alter3_db +POSTHOOK: query: CREATE DATABASE alter3_db POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:alter3_db PREHOOK: query: USE alter3_db diff --git a/ql/src/test/results/clientpositive/alter4.q.out b/ql/src/test/results/clientpositive/alter4.q.out index 1d895a2..ddcb0ed 100644 --- a/ql/src/test/results/clientpositive/alter4.q.out +++ b/ql/src/test/results/clientpositive/alter4.q.out @@ -34,13 +34,11 @@ key int value string #### A masked pattern was here #### -PREHOOK: query: -- Cleanup -DROP TABLE set_bucketing_test +PREHOOK: query: DROP TABLE set_bucketing_test PREHOOK: type: DROPTABLE PREHOOK: Input: default@set_bucketing_test PREHOOK: Output: default@set_bucketing_test -POSTHOOK: query: -- Cleanup -DROP TABLE set_bucketing_test +POSTHOOK: query: DROP TABLE set_bucketing_test POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@set_bucketing_test POSTHOOK: Output: default@set_bucketing_test @@ -65,14 +63,10 @@ src_thrift srcbucket srcbucket2 srcpart -PREHOOK: query: -- with non-default Database - -CREATE DATABASE alter4_db +PREHOOK: query: CREATE DATABASE alter4_db PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:alter4_db -POSTHOOK: query: -- with non-default Database - -CREATE DATABASE alter4_db +POSTHOOK: query: CREATE DATABASE alter4_db POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:alter4_db PREHOOK: query: USE alter4_db diff --git a/ql/src/test/results/clientpositive/alter5.q.out b/ql/src/test/results/clientpositive/alter5.q.out index 1e9573e..4c83dc0 100644 --- a/ql/src/test/results/clientpositive/alter5.q.out +++ b/ql/src/test/results/clientpositive/alter5.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- --- Added to validate the fix for HIVE-2117 - explicit partition location --- - -create table alter5_src ( col1 string ) stored as textfile +PREHOOK: query: create table alter5_src ( col1 string ) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@alter5_src -POSTHOOK: query: -- --- Added to validate the fix for HIVE-2117 - explicit partition location --- - -create table alter5_src ( col1 string ) stored as textfile +POSTHOOK: query: create table alter5_src ( col1 string ) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@alter5_src @@ -30,19 +22,11 @@ POSTHOOK: query: create table alter5 ( col1 string ) partitioned by (dt string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@alter5 -PREHOOK: query: -- --- Here's the interesting bit for HIVE-2117 - partition subdir should be --- named "parta". --- -alter table alter5 add partition (dt='a') location 'parta' +PREHOOK: query: alter table alter5 add partition (dt='a') location 'parta' PREHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### PREHOOK: Output: default@alter5 -POSTHOOK: query: -- --- Here's the interesting bit for HIVE-2117 - partition subdir should be --- named "parta". --- -alter table alter5 add partition (dt='a') location 'parta' +POSTHOOK: query: alter table alter5 add partition (dt='a') location 'parta' POSTHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### POSTHOOK: Output: default@alter5 @@ -102,13 +86,11 @@ dt string dt string #### A masked pattern was here #### -PREHOOK: query: -- Cleanup -DROP TABLE alter5_src +PREHOOK: query: DROP TABLE alter5_src PREHOOK: type: DROPTABLE PREHOOK: Input: default@alter5_src PREHOOK: Output: default@alter5_src -POSTHOOK: query: -- Cleanup -DROP TABLE alter5_src +POSTHOOK: query: DROP TABLE alter5_src POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@alter5_src POSTHOOK: Output: default@alter5_src @@ -141,14 +123,10 @@ src_thrift srcbucket srcbucket2 srcpart -PREHOOK: query: -- With non-default Database - -CREATE DATABASE alter5_db +PREHOOK: query: CREATE DATABASE alter5_db PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:alter5_db -POSTHOOK: query: -- With non-default Database - -CREATE DATABASE alter5_db +POSTHOOK: query: CREATE DATABASE alter5_db POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:alter5_db PREHOOK: query: USE alter5_db diff --git a/ql/src/test/results/clientpositive/alter_char1.q.out b/ql/src/test/results/clientpositive/alter_char1.q.out index b1a88df..ff783e8 100644 --- a/ql/src/test/results/clientpositive/alter_char1.q.out +++ b/ql/src/test/results/clientpositive/alter_char1.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create database ac +PREHOOK: query: create database ac PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:ac -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create database ac +POSTHOOK: query: create database ac POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:ac PREHOOK: query: create table ac.alter_char_1 (key string, value string) @@ -41,23 +37,19 @@ POSTHOOK: Input: ac@alter_char_1 0 val_0 10 val_10 100 val_100 -PREHOOK: query: -- change column to char -alter table ac.alter_char_1 change column value value char(20) +PREHOOK: query: alter table ac.alter_char_1 change column value value char(20) PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: ac@alter_char_1 PREHOOK: Output: ac@alter_char_1 -POSTHOOK: query: -- change column to char -alter table ac.alter_char_1 change column value value char(20) +POSTHOOK: query: alter table ac.alter_char_1 change column value value char(20) POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: ac@alter_char_1 POSTHOOK: Output: ac@alter_char_1 -PREHOOK: query: -- contents should still look the same -select * from ac.alter_char_1 +PREHOOK: query: select * from ac.alter_char_1 PREHOOK: type: QUERY PREHOOK: Input: ac@alter_char_1 #### A masked pattern was here #### -POSTHOOK: query: -- contents should still look the same -select * from ac.alter_char_1 +POSTHOOK: query: select * from ac.alter_char_1 POSTHOOK: type: QUERY POSTHOOK: Input: ac@alter_char_1 #### A masked pattern was here #### @@ -66,23 +58,19 @@ POSTHOOK: Input: ac@alter_char_1 0 val_0 10 val_10 100 val_100 -PREHOOK: query: -- change column to smaller char -alter table ac.alter_char_1 change column value value char(3) +PREHOOK: query: alter table ac.alter_char_1 change column value value char(3) PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: ac@alter_char_1 PREHOOK: Output: ac@alter_char_1 -POSTHOOK: query: -- change column to smaller char -alter table ac.alter_char_1 change column value value char(3) +POSTHOOK: query: alter table ac.alter_char_1 change column value value char(3) POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: ac@alter_char_1 POSTHOOK: Output: ac@alter_char_1 -PREHOOK: query: -- value column should be truncated now -select * from ac.alter_char_1 +PREHOOK: query: select * from ac.alter_char_1 PREHOOK: type: QUERY PREHOOK: Input: ac@alter_char_1 #### A masked pattern was here #### -POSTHOOK: query: -- value column should be truncated now -select * from ac.alter_char_1 +POSTHOOK: query: select * from ac.alter_char_1 POSTHOOK: type: QUERY POSTHOOK: Input: ac@alter_char_1 #### A masked pattern was here #### @@ -91,23 +79,19 @@ POSTHOOK: Input: ac@alter_char_1 0 val 10 val 100 val -PREHOOK: query: -- change back to bigger char -alter table ac.alter_char_1 change column value value char(20) +PREHOOK: query: alter table ac.alter_char_1 change column value value char(20) PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: ac@alter_char_1 PREHOOK: Output: ac@alter_char_1 -POSTHOOK: query: -- change back to bigger char -alter table ac.alter_char_1 change column value value char(20) +POSTHOOK: query: alter table ac.alter_char_1 change column value value char(20) POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: ac@alter_char_1 POSTHOOK: Output: ac@alter_char_1 -PREHOOK: query: -- column values should be full size again -select * from ac.alter_char_1 +PREHOOK: query: select * from ac.alter_char_1 PREHOOK: type: QUERY PREHOOK: Input: ac@alter_char_1 #### A masked pattern was here #### -POSTHOOK: query: -- column values should be full size again -select * from ac.alter_char_1 +POSTHOOK: query: select * from ac.alter_char_1 POSTHOOK: type: QUERY POSTHOOK: Input: ac@alter_char_1 #### A masked pattern was here #### @@ -116,13 +100,11 @@ POSTHOOK: Input: ac@alter_char_1 0 val_0 10 val_10 100 val_100 -PREHOOK: query: -- add char column -alter table ac.alter_char_1 add columns (key2 int, value2 char(10)) +PREHOOK: query: alter table ac.alter_char_1 add columns (key2 int, value2 char(10)) PREHOOK: type: ALTERTABLE_ADDCOLS PREHOOK: Input: ac@alter_char_1 PREHOOK: Output: ac@alter_char_1 -POSTHOOK: query: -- add char column -alter table ac.alter_char_1 add columns (key2 int, value2 char(10)) +POSTHOOK: query: alter table ac.alter_char_1 add columns (key2 int, value2 char(10)) POSTHOOK: type: ALTERTABLE_ADDCOLS POSTHOOK: Input: ac@alter_char_1 POSTHOOK: Output: ac@alter_char_1 diff --git a/ql/src/test/results/clientpositive/alter_char2.q.out b/ql/src/test/results/clientpositive/alter_char2.q.out index 7ea86ca..2084477 100644 --- a/ql/src/test/results/clientpositive/alter_char2.q.out +++ b/ql/src/test/results/clientpositive/alter_char2.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- alter column type, with partitioned table -drop table if exists alter_char2 +PREHOOK: query: drop table if exists alter_char2 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- alter column type, with partitioned table -drop table if exists alter_char2 +POSTHOOK: query: drop table if exists alter_char2 POSTHOOK: type: DROPTABLE PREHOOK: query: create table alter_char2 ( c1 char(255) diff --git a/ql/src/test/results/clientpositive/alter_file_format.q.out b/ql/src/test/results/clientpositive/alter_file_format.q.out index 14dd892..a69b423 100644 --- a/ql/src/test/results/clientpositive/alter_file_format.q.out +++ b/ql/src/test/results/clientpositive/alter_file_format.q.out @@ -303,13 +303,11 @@ PREHOOK: query: drop table alter_partition_format_test PREHOOK: type: DROPTABLE POSTHOOK: query: drop table alter_partition_format_test POSTHOOK: type: DROPTABLE -PREHOOK: query: --partitioned table -create table alter_partition_format_test (key int, value string) partitioned by (ds string) +PREHOOK: query: create table alter_partition_format_test (key int, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@alter_partition_format_test -POSTHOOK: query: --partitioned table -create table alter_partition_format_test (key int, value string) partitioned by (ds string) +POSTHOOK: query: create table alter_partition_format_test (key int, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@alter_partition_format_test diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out index 3b71598..53ed194 100644 --- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out +++ b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata --- the partition metadata is updated as well. - -CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tst1 -POSTHOOK: query: -- Tests that when overwriting a partition in a table after altering the bucketing/sorting metadata --- the partition metadata is updated as well. - -CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tst1 @@ -96,13 +90,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test an unbucketed partition gets converted to bucketed -ALTER TABLE tst1 CLUSTERED BY (key) INTO 8 BUCKETS +PREHOOK: query: ALTER TABLE tst1 CLUSTERED BY (key) INTO 8 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@tst1 PREHOOK: Output: default@tst1 -POSTHOOK: query: -- Test an unbucketed partition gets converted to bucketed -ALTER TABLE tst1 CLUSTERED BY (key) INTO 8 BUCKETS +POSTHOOK: query: ALTER TABLE tst1 CLUSTERED BY (key) INTO 8 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@tst1 POSTHOOK: Output: default@tst1 @@ -190,13 +182,11 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test an unsorted partition gets converted to sorted -ALTER TABLE tst1 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS +PREHOOK: query: ALTER TABLE tst1 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@tst1 PREHOOK: Output: default@tst1 -POSTHOOK: query: -- Test an unsorted partition gets converted to sorted -ALTER TABLE tst1 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS +POSTHOOK: query: ALTER TABLE tst1 CLUSTERED BY (key) SORTED BY (key DESC) INTO 8 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@tst1 POSTHOOK: Output: default@tst1 @@ -284,13 +274,11 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:0)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test changing the bucket columns -ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS +PREHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@tst1 PREHOOK: Output: default@tst1 -POSTHOOK: query: -- Test changing the bucket columns -ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS +POSTHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 8 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@tst1 POSTHOOK: Output: default@tst1 @@ -378,13 +366,11 @@ Bucket Columns: [value] Sort Columns: [Order(col:key, order:0)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test changing the number of buckets -ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS +PREHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@tst1 PREHOOK: Output: default@tst1 -POSTHOOK: query: -- Test changing the number of buckets -ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS +POSTHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (key DESC) INTO 4 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@tst1 POSTHOOK: Output: default@tst1 @@ -472,13 +458,11 @@ Bucket Columns: [value] Sort Columns: [Order(col:key, order:0)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test changing the sort columns -ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS +PREHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@tst1 PREHOOK: Output: default@tst1 -POSTHOOK: query: -- Test changing the sort columns -ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS +POSTHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value DESC) INTO 4 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@tst1 POSTHOOK: Output: default@tst1 @@ -566,13 +550,11 @@ Bucket Columns: [value] Sort Columns: [Order(col:value, order:0)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test changing the sort order -ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS +PREHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@tst1 PREHOOK: Output: default@tst1 -POSTHOOK: query: -- Test changing the sort order -ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS +POSTHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) SORTED BY (value ASC) INTO 4 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@tst1 POSTHOOK: Output: default@tst1 @@ -660,13 +642,11 @@ Bucket Columns: [value] Sort Columns: [Order(col:value, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test a sorted partition gets converted to unsorted -ALTER TABLE tst1 CLUSTERED BY (value) INTO 4 BUCKETS +PREHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) INTO 4 BUCKETS PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@tst1 PREHOOK: Output: default@tst1 -POSTHOOK: query: -- Test a sorted partition gets converted to unsorted -ALTER TABLE tst1 CLUSTERED BY (value) INTO 4 BUCKETS +POSTHOOK: query: ALTER TABLE tst1 CLUSTERED BY (value) INTO 4 BUCKETS POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@tst1 POSTHOOK: Output: default@tst1 @@ -754,13 +734,11 @@ Bucket Columns: [value] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test a bucketed partition gets converted to unbucketed -ALTER TABLE tst1 NOT CLUSTERED +PREHOOK: query: ALTER TABLE tst1 NOT CLUSTERED PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@tst1 PREHOOK: Output: default@tst1 -POSTHOOK: query: -- Test a bucketed partition gets converted to unbucketed -ALTER TABLE tst1 NOT CLUSTERED +POSTHOOK: query: ALTER TABLE tst1 NOT CLUSTERED POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@tst1 POSTHOOK: Output: default@tst1 diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out index cab3de4..578bad7 100644 --- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out +++ b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) -create table tst1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets +PREHOOK: query: create table tst1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tst1 -POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) -create table tst1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets +POSTHOOK: query: create table tst1(key string, value string) partitioned by (ds string) clustered by (key) into 10 buckets POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tst1 @@ -100,15 +98,11 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test changing bucket number - -alter table tst1 clustered by (key) into 12 buckets +PREHOOK: query: alter table tst1 clustered by (key) into 12 buckets PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@tst1 PREHOOK: Output: default@tst1 -POSTHOOK: query: -- Test changing bucket number - -alter table tst1 clustered by (key) into 12 buckets +POSTHOOK: query: alter table tst1 clustered by (key) into 12 buckets POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@tst1 POSTHOOK: Output: default@tst1 @@ -196,14 +190,10 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test changing bucket number of (table/partition) - -alter table tst1 into 4 buckets +PREHOOK: query: alter table tst1 into 4 buckets PREHOOK: type: ALTERTABLE_BUCKETNUM PREHOOK: Input: default@tst1 -POSTHOOK: query: -- Test changing bucket number of (table/partition) - -alter table tst1 into 4 buckets +POSTHOOK: query: alter table tst1 into 4 buckets POSTHOOK: type: ALTERTABLE_BUCKETNUM POSTHOOK: Input: default@tst1 POSTHOOK: Output: default@tst1 @@ -364,15 +354,11 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test adding sort order - -alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets +PREHOOK: query: alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@tst1 PREHOOK: Output: default@tst1 -POSTHOOK: query: -- Test adding sort order - -alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets +POSTHOOK: query: alter table tst1 clustered by (key) sorted by (key asc) into 12 buckets POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@tst1 POSTHOOK: Output: default@tst1 @@ -411,15 +397,11 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test changing sort order - -alter table tst1 clustered by (key) sorted by (value desc) into 12 buckets +PREHOOK: query: alter table tst1 clustered by (key) sorted by (value desc) into 12 buckets PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@tst1 PREHOOK: Output: default@tst1 -POSTHOOK: query: -- Test changing sort order - -alter table tst1 clustered by (key) sorted by (value desc) into 12 buckets +POSTHOOK: query: alter table tst1 clustered by (key) sorted by (value desc) into 12 buckets POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@tst1 POSTHOOK: Output: default@tst1 @@ -458,15 +440,11 @@ Bucket Columns: [key] Sort Columns: [Order(col:value, order:0)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test removing test order - -alter table tst1 clustered by (value) into 12 buckets +PREHOOK: query: alter table tst1 clustered by (value) into 12 buckets PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@tst1 PREHOOK: Output: default@tst1 -POSTHOOK: query: -- Test removing test order - -alter table tst1 clustered by (value) into 12 buckets +POSTHOOK: query: alter table tst1 clustered by (value) into 12 buckets POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@tst1 POSTHOOK: Output: default@tst1 @@ -505,15 +483,11 @@ Bucket Columns: [value] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test removing buckets - -alter table tst1 not clustered +PREHOOK: query: alter table tst1 not clustered PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@tst1 PREHOOK: Output: default@tst1 -POSTHOOK: query: -- Test removing buckets - -alter table tst1 not clustered +POSTHOOK: query: alter table tst1 not clustered POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@tst1 POSTHOOK: Output: default@tst1 diff --git a/ql/src/test/results/clientpositive/alter_partition_change_col.q.out b/ql/src/test/results/clientpositive/alter_partition_change_col.q.out index fff987c..09dfd1c 100644 --- a/ql/src/test/results/clientpositive/alter_partition_change_col.q.out +++ b/ql/src/test/results/clientpositive/alter_partition_change_col.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table alter_partition_change_col0 (c1 string, c2 string) +PREHOOK: query: create table alter_partition_change_col0 (c1 string, c2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@alter_partition_change_col0 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table alter_partition_change_col0 (c1 string, c2 string) +POSTHOOK: query: create table alter_partition_change_col0 (c1 string, c2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@alter_partition_change_col0 @@ -93,13 +89,11 @@ Snow 55.71 __HIVE_DEFAULT_PARTITION__ 123 Tom -12.25 __HIVE_DEFAULT_PARTITION__ 123 Tom 19.00 __HIVE_DEFAULT_PARTITION__ 123 Tom 234.79 __HIVE_DEFAULT_PARTITION__ 123 -PREHOOK: query: -- Change c2 to decimal(10,0) -alter table alter_partition_change_col1 change c2 c2 decimal(10,0) +PREHOOK: query: alter table alter_partition_change_col1 change c2 c2 decimal(10,0) PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@alter_partition_change_col1 PREHOOK: Output: default@alter_partition_change_col1 -POSTHOOK: query: -- Change c2 to decimal(10,0) -alter table alter_partition_change_col1 change c2 c2 decimal(10,0) +POSTHOOK: query: alter table alter_partition_change_col1 change c2 c2 decimal(10,0) POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: default@alter_partition_change_col1 POSTHOOK: Output: default@alter_partition_change_col1 @@ -161,13 +155,11 @@ Snow 56 __HIVE_DEFAULT_PARTITION__ 123 Tom -12 __HIVE_DEFAULT_PARTITION__ 123 Tom 19 __HIVE_DEFAULT_PARTITION__ 123 Tom 235 __HIVE_DEFAULT_PARTITION__ 123 -PREHOOK: query: -- Change the column type at the table level. Table-level describe shows the new type, but the existing partition does not. -alter table alter_partition_change_col1 change c2 c2 decimal(14,4) +PREHOOK: query: alter table alter_partition_change_col1 change c2 c2 decimal(14,4) PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@alter_partition_change_col1 PREHOOK: Output: default@alter_partition_change_col1 -POSTHOOK: query: -- Change the column type at the table level. Table-level describe shows the new type, but the existing partition does not. -alter table alter_partition_change_col1 change c2 c2 decimal(14,4) +POSTHOOK: query: alter table alter_partition_change_col1 change c2 c2 decimal(14,4) POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: default@alter_partition_change_col1 POSTHOOK: Output: default@alter_partition_change_col1 @@ -243,12 +235,10 @@ Snow 56.0000 __HIVE_DEFAULT_PARTITION__ 123 Tom -12.0000 __HIVE_DEFAULT_PARTITION__ 123 Tom 19.0000 __HIVE_DEFAULT_PARTITION__ 123 Tom 235.0000 __HIVE_DEFAULT_PARTITION__ 123 -PREHOOK: query: -- change the comment on a partition column without changing type or renaming it -alter table alter_partition_change_col1 partition column (p1 string comment 'Changed comment for p1') +PREHOOK: query: alter table alter_partition_change_col1 partition column (p1 string comment 'Changed comment for p1') PREHOOK: type: ALTERTABLE_PARTCOLTYPE PREHOOK: Input: default@alter_partition_change_col1 -POSTHOOK: query: -- change the comment on a partition column without changing type or renaming it -alter table alter_partition_change_col1 partition column (p1 string comment 'Changed comment for p1') +POSTHOOK: query: alter table alter_partition_change_col1 partition column (p1 string comment 'Changed comment for p1') POSTHOOK: type: ALTERTABLE_PARTCOLTYPE POSTHOOK: Input: default@alter_partition_change_col1 POSTHOOK: Output: default@alter_partition_change_col1 @@ -268,13 +258,11 @@ p2 string Column p2 p1 string Changed comment for p1 p2 string Column p2 -PREHOOK: query: -- now change the column type of the existing partition -alter table alter_partition_change_col1 partition (p1='abc', p2='123') change c2 c2 decimal(14,4) +PREHOOK: query: alter table alter_partition_change_col1 partition (p1='abc', p2='123') change c2 c2 decimal(14,4) PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@alter_partition_change_col1 PREHOOK: Output: default@alter_partition_change_col1@p1=abc/p2=123 -POSTHOOK: query: -- now change the column type of the existing partition -alter table alter_partition_change_col1 partition (p1='abc', p2='123') change c2 c2 decimal(14,4) +POSTHOOK: query: alter table alter_partition_change_col1 partition (p1='abc', p2='123') change c2 c2 decimal(14,4) POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: default@alter_partition_change_col1 POSTHOOK: Input: default@alter_partition_change_col1@p1=abc/p2=123 @@ -335,13 +323,11 @@ Snow 56.0000 __HIVE_DEFAULT_PARTITION__ 123 Tom -12.0000 __HIVE_DEFAULT_PARTITION__ 123 Tom 19.0000 __HIVE_DEFAULT_PARTITION__ 123 Tom 235.0000 __HIVE_DEFAULT_PARTITION__ 123 -PREHOOK: query: -- change column for default partition value -alter table alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') change c2 c2 decimal(14,4) +PREHOOK: query: alter table alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') change c2 c2 decimal(14,4) PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@alter_partition_change_col1 PREHOOK: Output: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123 -POSTHOOK: query: -- change column for default partition value -alter table alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') change c2 c2 decimal(14,4) +POSTHOOK: query: alter table alter_partition_change_col1 partition (p1='__HIVE_DEFAULT_PARTITION__', p2='123') change c2 c2 decimal(14,4) POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: default@alter_partition_change_col1 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123 @@ -402,13 +388,11 @@ Snow 55.7100 __HIVE_DEFAULT_PARTITION__ 123 Tom -12.2500 __HIVE_DEFAULT_PARTITION__ 123 Tom 19.0000 __HIVE_DEFAULT_PARTITION__ 123 Tom 234.7900 __HIVE_DEFAULT_PARTITION__ 123 -PREHOOK: query: -- Try out replace columns -alter table alter_partition_change_col1 partition (p1='abc', p2='123') replace columns (c1 string) +PREHOOK: query: alter table alter_partition_change_col1 partition (p1='abc', p2='123') replace columns (c1 string) PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@alter_partition_change_col1 PREHOOK: Output: default@alter_partition_change_col1@p1=abc/p2=123 -POSTHOOK: query: -- Try out replace columns -alter table alter_partition_change_col1 partition (p1='abc', p2='123') replace columns (c1 string) +POSTHOOK: query: alter table alter_partition_change_col1 partition (p1='abc', p2='123') replace columns (c1 string) POSTHOOK: type: ALTERTABLE_REPLACECOLS POSTHOOK: Input: default@alter_partition_change_col1 POSTHOOK: Input: default@alter_partition_change_col1@p1=abc/p2=123 @@ -547,13 +531,11 @@ Snow __HIVE_DEFAULT_PARTITION__ 123 Tom __HIVE_DEFAULT_PARTITION__ 123 Tom __HIVE_DEFAULT_PARTITION__ 123 Tom __HIVE_DEFAULT_PARTITION__ 123 -PREHOOK: query: -- Try add columns -alter table alter_partition_change_col1 add columns (c2 decimal(14,4)) +PREHOOK: query: alter table alter_partition_change_col1 add columns (c2 decimal(14,4)) PREHOOK: type: ALTERTABLE_ADDCOLS PREHOOK: Input: default@alter_partition_change_col1 PREHOOK: Output: default@alter_partition_change_col1 -POSTHOOK: query: -- Try add columns -alter table alter_partition_change_col1 add columns (c2 decimal(14,4)) +POSTHOOK: query: alter table alter_partition_change_col1 add columns (c2 decimal(14,4)) POSTHOOK: type: ALTERTABLE_ADDCOLS POSTHOOK: Input: default@alter_partition_change_col1 POSTHOOK: Output: default@alter_partition_change_col1 @@ -693,14 +675,12 @@ Snow 55.7100 __HIVE_DEFAULT_PARTITION__ 123 Tom -12.2500 __HIVE_DEFAULT_PARTITION__ 123 Tom 19.0000 __HIVE_DEFAULT_PARTITION__ 123 Tom 234.7900 __HIVE_DEFAULT_PARTITION__ 123 -PREHOOK: query: -- Try changing column for all partitions at once -alter table alter_partition_change_col1 partition (p1, p2='123') change column c2 c2 decimal(10,0) +PREHOOK: query: alter table alter_partition_change_col1 partition (p1, p2='123') change column c2 c2 decimal(10,0) PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@alter_partition_change_col1 PREHOOK: Output: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123 PREHOOK: Output: default@alter_partition_change_col1@p1=abc/p2=123 -POSTHOOK: query: -- Try changing column for all partitions at once -alter table alter_partition_change_col1 partition (p1, p2='123') change column c2 c2 decimal(10,0) +POSTHOOK: query: alter table alter_partition_change_col1 partition (p1, p2='123') change column c2 c2 decimal(10,0) POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: default@alter_partition_change_col1 POSTHOOK: Input: default@alter_partition_change_col1@p1=__HIVE_DEFAULT_PARTITION__/p2=123 diff --git a/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out b/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out index 3234792..29fbe6f 100644 --- a/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out +++ b/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out @@ -13,15 +13,11 @@ POSTHOOK: query: alter table alter_table_partition_clusterby_sortby add partitio POSTHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: Output: default@alter_table_partition_clusterby_sortby POSTHOOK: Output: default@alter_table_partition_clusterby_sortby@c=abc -PREHOOK: query: -- Turn off sorting for a partition - -alter table alter_table_partition_clusterby_sortby partition(c='abc') not sorted +PREHOOK: query: alter table alter_table_partition_clusterby_sortby partition(c='abc') not sorted PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@alter_table_partition_clusterby_sortby PREHOOK: Output: default@alter_table_partition_clusterby_sortby@c=abc -POSTHOOK: query: -- Turn off sorting for a partition - -alter table alter_table_partition_clusterby_sortby partition(c='abc') not sorted +POSTHOOK: query: alter table alter_table_partition_clusterby_sortby partition(c='abc') not sorted POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@alter_table_partition_clusterby_sortby POSTHOOK: Input: default@alter_table_partition_clusterby_sortby@c=abc @@ -66,15 +62,11 @@ Bucket Columns: [a, b] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Modify clustering for a partition - -alter table alter_table_partition_clusterby_sortby partition(c='abc') clustered by (b) sorted by (b desc) into 4 buckets +PREHOOK: query: alter table alter_table_partition_clusterby_sortby partition(c='abc') clustered by (b) sorted by (b desc) into 4 buckets PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@alter_table_partition_clusterby_sortby PREHOOK: Output: default@alter_table_partition_clusterby_sortby@c=abc -POSTHOOK: query: -- Modify clustering for a partition - -alter table alter_table_partition_clusterby_sortby partition(c='abc') clustered by (b) sorted by (b desc) into 4 buckets +POSTHOOK: query: alter table alter_table_partition_clusterby_sortby partition(c='abc') clustered by (b) sorted by (b desc) into 4 buckets POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@alter_table_partition_clusterby_sortby POSTHOOK: Input: default@alter_table_partition_clusterby_sortby@c=abc @@ -119,15 +111,11 @@ Bucket Columns: [b] Sort Columns: [Order(col:b, order:0)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Turn off clustering for a partition - -alter table alter_table_partition_clusterby_sortby partition(c='abc') not clustered +PREHOOK: query: alter table alter_table_partition_clusterby_sortby partition(c='abc') not clustered PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@alter_table_partition_clusterby_sortby PREHOOK: Output: default@alter_table_partition_clusterby_sortby@c=abc -POSTHOOK: query: -- Turn off clustering for a partition - -alter table alter_table_partition_clusterby_sortby partition(c='abc') not clustered +POSTHOOK: query: alter table alter_table_partition_clusterby_sortby partition(c='abc') not clustered POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@alter_table_partition_clusterby_sortby POSTHOOK: Input: default@alter_table_partition_clusterby_sortby@c=abc @@ -172,14 +160,10 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Table properties should be unchanged - -desc formatted alter_table_partition_clusterby_sortby +PREHOOK: query: desc formatted alter_table_partition_clusterby_sortby PREHOOK: type: DESCTABLE PREHOOK: Input: default@alter_table_partition_clusterby_sortby -POSTHOOK: query: -- Table properties should be unchanged - -desc formatted alter_table_partition_clusterby_sortby +POSTHOOK: query: desc formatted alter_table_partition_clusterby_sortby POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@alter_table_partition_clusterby_sortby # col_name data_type comment diff --git a/ql/src/test/results/clientpositive/alter_partition_coltype.q.out b/ql/src/test/results/clientpositive/alter_partition_coltype.q.out index 703a8e3..0ebbdc0 100644 --- a/ql/src/test/results/clientpositive/alter_partition_coltype.q.out +++ b/ql/src/test/results/clientpositive/alter_partition_coltype.q.out @@ -1,20 +1,16 @@ -PREHOOK: query: -- create testing table. -create table alter_coltype(key string, value string) partitioned by (dt string, ts string) +PREHOOK: query: create table alter_coltype(key string, value string) partitioned by (dt string, ts string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@alter_coltype -POSTHOOK: query: -- create testing table. -create table alter_coltype(key string, value string) partitioned by (dt string, ts string) +POSTHOOK: query: create table alter_coltype(key string, value string) partitioned by (dt string, ts string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@alter_coltype -PREHOOK: query: -- insert and create a partition. -insert overwrite table alter_coltype partition(dt='100', ts='6.30') select * from src1 +PREHOOK: query: insert overwrite table alter_coltype partition(dt='100', ts='6.30') select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 PREHOOK: Output: default@alter_coltype@dt=100/ts=6.30 -POSTHOOK: query: -- insert and create a partition. -insert overwrite table alter_coltype partition(dt='100', ts='6.30') select * from src1 +POSTHOOK: query: insert overwrite table alter_coltype partition(dt='100', ts='6.30') select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 POSTHOOK: Output: default@alter_coltype@dt=100/ts=6.30 @@ -36,45 +32,37 @@ ts string dt string ts string -PREHOOK: query: -- select with paritition predicate. -select count(*) from alter_coltype where dt = '100' +PREHOOK: query: select count(*) from alter_coltype where dt = '100' PREHOOK: type: QUERY PREHOOK: Input: default@alter_coltype #### A masked pattern was here #### -POSTHOOK: query: -- select with paritition predicate. -select count(*) from alter_coltype where dt = '100' +POSTHOOK: query: select count(*) from alter_coltype where dt = '100' POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype #### A masked pattern was here #### 25 -PREHOOK: query: -- alter partition key column data type for dt column. -alter table alter_coltype partition column (dt int) +PREHOOK: query: alter table alter_coltype partition column (dt int) PREHOOK: type: ALTERTABLE_PARTCOLTYPE PREHOOK: Input: default@alter_coltype -POSTHOOK: query: -- alter partition key column data type for dt column. -alter table alter_coltype partition column (dt int) +POSTHOOK: query: alter table alter_coltype partition column (dt int) POSTHOOK: type: ALTERTABLE_PARTCOLTYPE POSTHOOK: Input: default@alter_coltype POSTHOOK: Output: default@alter_coltype -PREHOOK: query: -- load a new partition using new data type. -insert overwrite table alter_coltype partition(dt=100, ts='3.0') select * from src1 +PREHOOK: query: insert overwrite table alter_coltype partition(dt=100, ts='3.0') select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 PREHOOK: Output: default@alter_coltype@dt=100/ts=3.0 -POSTHOOK: query: -- load a new partition using new data type. -insert overwrite table alter_coltype partition(dt=100, ts='3.0') select * from src1 +POSTHOOK: query: insert overwrite table alter_coltype partition(dt=100, ts='3.0') select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 POSTHOOK: Output: default@alter_coltype@dt=100/ts=3.0 POSTHOOK: Lineage: alter_coltype PARTITION(dt=100,ts=3.0).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: alter_coltype PARTITION(dt=100,ts=3.0).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- make sure the partition predicate still works. -select count(*) from alter_coltype where dt = '100' +PREHOOK: query: select count(*) from alter_coltype where dt = '100' PREHOOK: type: QUERY PREHOOK: Input: default@alter_coltype #### A masked pattern was here #### -POSTHOOK: query: -- make sure the partition predicate still works. -select count(*) from alter_coltype where dt = '100' +POSTHOOK: query: select count(*) from alter_coltype where dt = '100' POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype #### A masked pattern was here #### @@ -93,12 +81,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- alter partition key column data type for ts column. -alter table alter_coltype partition column (ts double) +PREHOOK: query: alter table alter_coltype partition column (ts double) PREHOOK: type: ALTERTABLE_PARTCOLTYPE PREHOOK: Input: default@alter_coltype -POSTHOOK: query: -- alter partition key column data type for ts column. -alter table alter_coltype partition column (ts double) +POSTHOOK: query: alter table alter_coltype partition column (ts double) POSTHOOK: type: ALTERTABLE_PARTCOLTYPE POSTHOOK: Input: default@alter_coltype POSTHOOK: Output: default@alter_coltype @@ -109,25 +95,21 @@ POSTHOOK: query: alter table alter_coltype partition column (dt string) POSTHOOK: type: ALTERTABLE_PARTCOLTYPE POSTHOOK: Input: default@alter_coltype POSTHOOK: Output: default@alter_coltype -PREHOOK: query: -- load a new partition using new data type. -insert overwrite table alter_coltype partition(dt='100', ts=3.0) select * from src1 +PREHOOK: query: insert overwrite table alter_coltype partition(dt='100', ts=3.0) select * from src1 PREHOOK: type: QUERY PREHOOK: Input: default@src1 PREHOOK: Output: default@alter_coltype@dt=100/ts=3.0 -POSTHOOK: query: -- load a new partition using new data type. -insert overwrite table alter_coltype partition(dt='100', ts=3.0) select * from src1 +POSTHOOK: query: insert overwrite table alter_coltype partition(dt='100', ts=3.0) select * from src1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 POSTHOOK: Output: default@alter_coltype@dt=100/ts=3.0 POSTHOOK: Lineage: alter_coltype PARTITION(dt=100,ts=3.0).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: alter_coltype PARTITION(dt=100,ts=3.0).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- validate partition key column predicate can still work. -select count(*) from alter_coltype where ts = '6.30' +PREHOOK: query: select count(*) from alter_coltype where ts = '6.30' PREHOOK: type: QUERY PREHOOK: Input: default@alter_coltype #### A masked pattern was here #### -POSTHOOK: query: -- validate partition key column predicate can still work. -select count(*) from alter_coltype where ts = '6.30' +POSTHOOK: query: select count(*) from alter_coltype where ts = '6.30' POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype #### A masked pattern was here #### @@ -146,15 +128,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- validate partition key column predicate on two different partition column data type --- can still work. -select count(*) from alter_coltype where ts = 3.0 and dt=100 +PREHOOK: query: select count(*) from alter_coltype where ts = 3.0 and dt=100 PREHOOK: type: QUERY PREHOOK: Input: default@alter_coltype #### A masked pattern was here #### -POSTHOOK: query: -- validate partition key column predicate on two different partition column data type --- can still work. -select count(*) from alter_coltype where ts = 3.0 and dt=100 +POSTHOOK: query: select count(*) from alter_coltype where ts = 3.0 and dt=100 POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype #### A masked pattern was here #### @@ -173,15 +151,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- query where multiple partition values (of different datatypes) are being selected -select key, value, dt, ts from alter_coltype where dt is not null +PREHOOK: query: select key, value, dt, ts from alter_coltype where dt is not null PREHOOK: type: QUERY PREHOOK: Input: default@alter_coltype PREHOOK: Input: default@alter_coltype@dt=100/ts=3.0 PREHOOK: Input: default@alter_coltype@dt=100/ts=6.30 #### A masked pattern was here #### -POSTHOOK: query: -- query where multiple partition values (of different datatypes) are being selected -select key, value, dt, ts from alter_coltype where dt is not null +POSTHOOK: query: select key, value, dt, ts from alter_coltype where dt is not null POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype POSTHOOK: Input: default@alter_coltype@dt=100/ts=3.0 @@ -357,13 +333,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype #### A masked pattern was here #### 25 -PREHOOK: query: -- make sure the partition predicate still works. -select count(*) from alter_coltype where dt = '100' +PREHOOK: query: select count(*) from alter_coltype where dt = '100' PREHOOK: type: QUERY PREHOOK: Input: default@alter_coltype #### A masked pattern was here #### -POSTHOOK: query: -- make sure the partition predicate still works. -select count(*) from alter_coltype where dt = '100' +POSTHOOK: query: select count(*) from alter_coltype where dt = '100' POSTHOOK: type: QUERY POSTHOOK: Input: default@alter_coltype #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/alter_partition_format_loc.q.out b/ql/src/test/results/clientpositive/alter_partition_format_loc.q.out index 3152bda..ea2a70c 100644 --- a/ql/src/test/results/clientpositive/alter_partition_format_loc.q.out +++ b/ql/src/test/results/clientpositive/alter_partition_format_loc.q.out @@ -60,13 +60,11 @@ POSTHOOK: query: drop table alter_partition_format_test POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@alter_partition_format_test POSTHOOK: Output: default@alter_partition_format_test -PREHOOK: query: --partitioned table -create table alter_partition_format_test (key int, value string) partitioned by (ds string) +PREHOOK: query: create table alter_partition_format_test (key int, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@alter_partition_format_test -POSTHOOK: query: --partitioned table -create table alter_partition_format_test (key int, value string) partitioned by (ds string) +POSTHOOK: query: create table alter_partition_format_test (key int, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@alter_partition_format_test diff --git a/ql/src/test/results/clientpositive/alter_partition_with_whitelist.q.out b/ql/src/test/results/clientpositive/alter_partition_with_whitelist.q.out index 040ec45..ff79150 100644 --- a/ql/src/test/results/clientpositive/alter_partition_with_whitelist.q.out +++ b/ql/src/test/results/clientpositive/alter_partition_with_whitelist.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- This pattern matches only letters. - -CREATE TABLE part_whitelist_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE part_whitelist_test (key STRING, value STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@part_whitelist_test -POSTHOOK: query: -- This pattern matches only letters. - -CREATE TABLE part_whitelist_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE part_whitelist_test (key STRING, value STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@part_whitelist_test diff --git a/ql/src/test/results/clientpositive/alter_rename_partition.q.out b/ql/src/test/results/clientpositive/alter_rename_partition.q.out index 9a119a9..5702d39 100644 --- a/ql/src/test/results/clientpositive/alter_rename_partition.q.out +++ b/ql/src/test/results/clientpositive/alter_rename_partition.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- Cleanup -DROP TABLE alter_rename_partition_src +PREHOOK: query: DROP TABLE alter_rename_partition_src PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Cleanup -DROP TABLE alter_rename_partition_src +POSTHOOK: query: DROP TABLE alter_rename_partition_src POSTHOOK: type: DROPTABLE PREHOOK: query: DROP TABLE alter_rename_partition PREHOOK: type: DROPTABLE @@ -119,13 +117,11 @@ POSTHOOK: Input: default@alter_rename_partition@pcol1=new_part1%3A/pcol2=new_par 4 new_part1: new_part2: 5 new_part1: new_part2: 6 new_part1: new_part2: -PREHOOK: query: -- Cleanup -DROP TABLE alter_rename_partition_src +PREHOOK: query: DROP TABLE alter_rename_partition_src PREHOOK: type: DROPTABLE PREHOOK: Input: default@alter_rename_partition_src PREHOOK: Output: default@alter_rename_partition_src -POSTHOOK: query: -- Cleanup -DROP TABLE alter_rename_partition_src +POSTHOOK: query: DROP TABLE alter_rename_partition_src POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@alter_rename_partition_src POSTHOOK: Output: default@alter_rename_partition_src @@ -158,14 +154,10 @@ src_thrift srcbucket srcbucket2 srcpart -PREHOOK: query: -- With non-default Database - -CREATE DATABASE alter_rename_partition_db +PREHOOK: query: CREATE DATABASE alter_rename_partition_db PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:alter_rename_partition_db -POSTHOOK: query: -- With non-default Database - -CREATE DATABASE alter_rename_partition_db +POSTHOOK: query: CREATE DATABASE alter_rename_partition_db POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:alter_rename_partition_db PREHOOK: query: USE alter_rename_partition_db diff --git a/ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out b/ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out index 79489ad..b2e3277 100644 --- a/ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out +++ b/ql/src/test/results/clientpositive/alter_rename_partition_authorization.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF - -create table src_auth_tmp as select * from src +PREHOOK: query: create table src_auth_tmp as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@src_auth_tmp -POSTHOOK: query: -- SORT_BEFORE_DIFF - -create table src_auth_tmp as select * from src +POSTHOOK: query: create table src_auth_tmp as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default @@ -36,12 +32,10 @@ PREHOOK: Output: default@src_auth_tmp POSTHOOK: query: grant select on table src_auth_tmp to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_auth_tmp -PREHOOK: query: -- column grant to user -grant Create on table authorization_part to user hive_test_user +PREHOOK: query: grant Create on table authorization_part to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@authorization_part -POSTHOOK: query: -- column grant to user -grant Create on table authorization_part to user hive_test_user +POSTHOOK: query: grant Create on table authorization_part to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@authorization_part PREHOOK: query: grant Update on table authorization_part to user hive_test_user diff --git a/ql/src/test/results/clientpositive/alter_table_cascade.q.out b/ql/src/test/results/clientpositive/alter_table_cascade.q.out index 5b9c9ee..13e6b24 100644 --- a/ql/src/test/results/clientpositive/alter_table_cascade.q.out +++ b/ql/src/test/results/clientpositive/alter_table_cascade.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -drop table if exists alter_table_src +PREHOOK: query: drop table if exists alter_table_src PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -drop table if exists alter_table_src +POSTHOOK: query: drop table if exists alter_table_src POSTHOOK: type: DROPTABLE PREHOOK: query: drop table if exists alter_table_cascade PREHOOK: type: DROPTABLE @@ -144,21 +140,13 @@ Snow __HIVE_DEFAULT_PARTITION__ 123 Tom __HIVE_DEFAULT_PARTITION__ 123 Tom __HIVE_DEFAULT_PARTITION__ 123 Tom __HIVE_DEFAULT_PARTITION__ 123 -PREHOOK: query: -- add columns c2 by replace columns (for HIVE-6131) --- reload data to existing partition __HIVE_DEFAULT_PARTITION__ --- load data to a new partition xyz --- querying data (form new or existing partition) should return non-NULL values for the new column -alter table alter_table_cascade replace columns (c1 string, c2 string) cascade +PREHOOK: query: alter table alter_table_cascade replace columns (c1 string, c2 string) cascade PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@alter_table_cascade PREHOOK: Output: default@alter_table_cascade PREHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 PREHOOK: Output: default@alter_table_cascade@p1=abc/p2=123 -POSTHOOK: query: -- add columns c2 by replace columns (for HIVE-6131) --- reload data to existing partition __HIVE_DEFAULT_PARTITION__ --- load data to a new partition xyz --- querying data (form new or existing partition) should return non-NULL values for the new column -alter table alter_table_cascade replace columns (c1 string, c2 string) cascade +POSTHOOK: query: alter table alter_table_cascade replace columns (c1 string, c2 string) cascade POSTHOOK: type: ALTERTABLE_REPLACECOLS POSTHOOK: Input: default@alter_table_cascade POSTHOOK: Output: default@alter_table_cascade @@ -305,18 +293,14 @@ Snow 55.71 __HIVE_DEFAULT_PARTITION__ 123 Tom -12.25 __HIVE_DEFAULT_PARTITION__ 123 Tom 19.00 __HIVE_DEFAULT_PARTITION__ 123 Tom 234.79 __HIVE_DEFAULT_PARTITION__ 123 -PREHOOK: query: -- Change c2 to decimal(10,0), the change should cascaded to all partitions --- the c2 value returned should be in decimal(10,0) -alter table alter_table_cascade change c2 c2 decimal(10,0) comment "change datatype" cascade +PREHOOK: query: alter table alter_table_cascade change c2 c2 decimal(10,0) comment "change datatype" cascade PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@alter_table_cascade PREHOOK: Output: default@alter_table_cascade PREHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 PREHOOK: Output: default@alter_table_cascade@p1=abc/p2=123 PREHOOK: Output: default@alter_table_cascade@p1=xyz/p2=123 -POSTHOOK: query: -- Change c2 to decimal(10,0), the change should cascaded to all partitions --- the c2 value returned should be in decimal(10,0) -alter table alter_table_cascade change c2 c2 decimal(10,0) comment "change datatype" cascade +POSTHOOK: query: alter table alter_table_cascade change c2 c2 decimal(10,0) comment "change datatype" cascade POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: default@alter_table_cascade POSTHOOK: Output: default@alter_table_cascade @@ -447,16 +431,14 @@ Snow 56 __HIVE_DEFAULT_PARTITION__ 123 Tom -12 __HIVE_DEFAULT_PARTITION__ 123 Tom 19 __HIVE_DEFAULT_PARTITION__ 123 Tom 235 __HIVE_DEFAULT_PARTITION__ 123 -PREHOOK: query: -- rename c1 to c2fromc1 and move it to after c2, the change should cascaded to all partitions -alter table alter_table_cascade change c1 c2fromc1 string comment "change position after" after c2 cascade +PREHOOK: query: alter table alter_table_cascade change c1 c2fromc1 string comment "change position after" after c2 cascade PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@alter_table_cascade PREHOOK: Output: default@alter_table_cascade PREHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 PREHOOK: Output: default@alter_table_cascade@p1=abc/p2=123 PREHOOK: Output: default@alter_table_cascade@p1=xyz/p2=123 -POSTHOOK: query: -- rename c1 to c2fromc1 and move it to after c2, the change should cascaded to all partitions -alter table alter_table_cascade change c1 c2fromc1 string comment "change position after" after c2 cascade +POSTHOOK: query: alter table alter_table_cascade change c1 c2fromc1 string comment "change position after" after c2 cascade POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: default@alter_table_cascade POSTHOOK: Output: default@alter_table_cascade @@ -527,16 +509,14 @@ p2 string p1 string p2 string -PREHOOK: query: -- rename c2fromc1 back to c1 and move to first as c1, the change should cascaded to all partitions -alter table alter_table_cascade change c2fromc1 c1 string comment "change position first" first cascade +PREHOOK: query: alter table alter_table_cascade change c2fromc1 c1 string comment "change position first" first cascade PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@alter_table_cascade PREHOOK: Output: default@alter_table_cascade PREHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 PREHOOK: Output: default@alter_table_cascade@p1=abc/p2=123 PREHOOK: Output: default@alter_table_cascade@p1=xyz/p2=123 -POSTHOOK: query: -- rename c2fromc1 back to c1 and move to first as c1, the change should cascaded to all partitions -alter table alter_table_cascade change c2fromc1 c1 string comment "change position first" first cascade +POSTHOOK: query: alter table alter_table_cascade change c2fromc1 c1 string comment "change position first" first cascade POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: default@alter_table_cascade POSTHOOK: Output: default@alter_table_cascade @@ -607,16 +587,14 @@ p2 string p1 string p2 string -PREHOOK: query: -- Try out replace columns, the change should cascaded to all partitions -alter table alter_table_cascade replace columns (c1 string) cascade +PREHOOK: query: alter table alter_table_cascade replace columns (c1 string) cascade PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@alter_table_cascade PREHOOK: Output: default@alter_table_cascade PREHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 PREHOOK: Output: default@alter_table_cascade@p1=abc/p2=123 PREHOOK: Output: default@alter_table_cascade@p1=xyz/p2=123 -POSTHOOK: query: -- Try out replace columns, the change should cascaded to all partitions -alter table alter_table_cascade replace columns (c1 string) cascade +POSTHOOK: query: alter table alter_table_cascade replace columns (c1 string) cascade POSTHOOK: type: ALTERTABLE_REPLACECOLS POSTHOOK: Input: default@alter_table_cascade POSTHOOK: Output: default@alter_table_cascade @@ -743,16 +721,14 @@ Snow __HIVE_DEFAULT_PARTITION__ 123 Tom __HIVE_DEFAULT_PARTITION__ 123 Tom __HIVE_DEFAULT_PARTITION__ 123 Tom __HIVE_DEFAULT_PARTITION__ 123 -PREHOOK: query: -- Try add columns, the change should cascaded to all partitions -alter table alter_table_cascade add columns (c2 decimal(14,4)) cascade +PREHOOK: query: alter table alter_table_cascade add columns (c2 decimal(14,4)) cascade PREHOOK: type: ALTERTABLE_ADDCOLS PREHOOK: Input: default@alter_table_cascade PREHOOK: Output: default@alter_table_cascade PREHOOK: Output: default@alter_table_cascade@p1=__HIVE_DEFAULT_PARTITION__/p2=123 PREHOOK: Output: default@alter_table_cascade@p1=abc/p2=123 PREHOOK: Output: default@alter_table_cascade@p1=xyz/p2=123 -POSTHOOK: query: -- Try add columns, the change should cascaded to all partitions -alter table alter_table_cascade add columns (c2 decimal(14,4)) cascade +POSTHOOK: query: alter table alter_table_cascade add columns (c2 decimal(14,4)) cascade POSTHOOK: type: ALTERTABLE_ADDCOLS POSTHOOK: Input: default@alter_table_cascade POSTHOOK: Output: default@alter_table_cascade @@ -883,13 +859,9 @@ Snow 55.7100 __HIVE_DEFAULT_PARTITION__ 123 Tom -12.2500 __HIVE_DEFAULT_PARTITION__ 123 Tom 19.0000 __HIVE_DEFAULT_PARTITION__ 123 Tom 234.7900 __HIVE_DEFAULT_PARTITION__ 123 -PREHOOK: query: -- - -drop table if exists alter_table_restrict +PREHOOK: query: drop table if exists alter_table_restrict PREHOOK: type: DROPTABLE -POSTHOOK: query: -- - -drop table if exists alter_table_restrict +POSTHOOK: query: drop table if exists alter_table_restrict POSTHOOK: type: DROPTABLE PREHOOK: query: create table alter_table_restrict (c1 string) partitioned by (p1 string, p2 string) PREHOOK: type: CREATETABLE @@ -1009,17 +981,11 @@ Snow __HIVE_DEFAULT_PARTITION__ 123 Tom __HIVE_DEFAULT_PARTITION__ 123 Tom __HIVE_DEFAULT_PARTITION__ 123 Tom __HIVE_DEFAULT_PARTITION__ 123 -PREHOOK: query: -- add columns c2 by replace columns (for HIVE-6131) without cascade --- only table column definition has changed, partitions do not --- after replace, only new partition xyz return the value to new added columns but not existing partitions abc and __HIVE_DEFAULT_PARTITION__ -alter table alter_table_restrict replace columns (c1 string, c2 string) restrict +PREHOOK: query: alter table alter_table_restrict replace columns (c1 string, c2 string) restrict PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@alter_table_restrict PREHOOK: Output: default@alter_table_restrict -POSTHOOK: query: -- add columns c2 by replace columns (for HIVE-6131) without cascade --- only table column definition has changed, partitions do not --- after replace, only new partition xyz return the value to new added columns but not existing partitions abc and __HIVE_DEFAULT_PARTITION__ -alter table alter_table_restrict replace columns (c1 string, c2 string) restrict +POSTHOOK: query: alter table alter_table_restrict replace columns (c1 string, c2 string) restrict POSTHOOK: type: ALTERTABLE_REPLACECOLS POSTHOOK: Input: default@alter_table_restrict POSTHOOK: Output: default@alter_table_restrict @@ -1170,13 +1136,11 @@ Snow NULL __HIVE_DEFAULT_PARTITION__ 123 Tom NULL __HIVE_DEFAULT_PARTITION__ 123 Tom NULL __HIVE_DEFAULT_PARTITION__ 123 Tom NULL __HIVE_DEFAULT_PARTITION__ 123 -PREHOOK: query: -- Change c2 to decimal(10,0), only limited to table and new partition -alter table alter_table_restrict change c2 c2 decimal(10,0) restrict +PREHOOK: query: alter table alter_table_restrict change c2 c2 decimal(10,0) restrict PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@alter_table_restrict PREHOOK: Output: default@alter_table_restrict -POSTHOOK: query: -- Change c2 to decimal(10,0), only limited to table and new partition -alter table alter_table_restrict change c2 c2 decimal(10,0) restrict +POSTHOOK: query: alter table alter_table_restrict change c2 c2 decimal(10,0) restrict POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: default@alter_table_restrict POSTHOOK: Output: default@alter_table_restrict @@ -1242,13 +1206,11 @@ p2 string p1 string p2 string -PREHOOK: query: -- Try out replace columns, only limited to table and new partition -alter table alter_table_restrict replace columns (c1 string) +PREHOOK: query: alter table alter_table_restrict replace columns (c1 string) PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@alter_table_restrict PREHOOK: Output: default@alter_table_restrict -POSTHOOK: query: -- Try out replace columns, only limited to table and new partition -alter table alter_table_restrict replace columns (c1 string) +POSTHOOK: query: alter table alter_table_restrict replace columns (c1 string) POSTHOOK: type: ALTERTABLE_REPLACECOLS POSTHOOK: Input: default@alter_table_restrict POSTHOOK: Output: default@alter_table_restrict @@ -1313,13 +1275,11 @@ p2 string p1 string p2 string -PREHOOK: query: -- Try add columns, only limited to table and new partition -alter table alter_table_restrict add columns (c2 decimal(14,4)) +PREHOOK: query: alter table alter_table_restrict add columns (c2 decimal(14,4)) PREHOOK: type: ALTERTABLE_ADDCOLS PREHOOK: Input: default@alter_table_restrict PREHOOK: Output: default@alter_table_restrict -POSTHOOK: query: -- Try add columns, only limited to table and new partition -alter table alter_table_restrict add columns (c2 decimal(14,4)) +POSTHOOK: query: alter table alter_table_restrict add columns (c2 decimal(14,4)) POSTHOOK: type: ALTERTABLE_ADDCOLS POSTHOOK: Input: default@alter_table_restrict POSTHOOK: Output: default@alter_table_restrict diff --git a/ql/src/test/results/clientpositive/alter_table_serde.q.out b/ql/src/test/results/clientpositive/alter_table_serde.q.out index 1c1707c..f414bde 100644 --- a/ql/src/test/results/clientpositive/alter_table_serde.q.out +++ b/ql/src/test/results/clientpositive/alter_table_serde.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- test table -create table test_table (id int, query string, name string) +PREHOOK: query: create table test_table (id int, query string, name string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table -POSTHOOK: query: -- test table -create table test_table (id int, query string, name string) +POSTHOOK: query: create table test_table (id int, query string, name string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table @@ -65,13 +63,11 @@ POSTHOOK: query: drop table test_table POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@test_table POSTHOOK: Output: default@test_table -PREHOOK: query: --- test partitioned table -create table test_table (id int, query string, name string) partitioned by (dt string) +PREHOOK: query: create table test_table (id int, query string, name string) partitioned by (dt string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table -POSTHOOK: query: --- test partitioned table -create table test_table (id int, query string, name string) partitioned by (dt string) +POSTHOOK: query: create table test_table (id int, query string, name string) partitioned by (dt string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table @@ -149,15 +145,11 @@ dt string dt string #### A masked pattern was here #### -PREHOOK: query: -- test partitions - -alter table test_table partition(dt='2011') set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +PREHOOK: query: alter table test_table partition(dt='2011') set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' PREHOOK: type: ALTERPARTITION_SERIALIZER PREHOOK: Input: default@test_table PREHOOK: Output: default@test_table@dt=2011 -POSTHOOK: query: -- test partitions - -alter table test_table partition(dt='2011') set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +POSTHOOK: query: alter table test_table partition(dt='2011') set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' POSTHOOK: type: ALTERPARTITION_SERIALIZER POSTHOOK: Input: default@test_table POSTHOOK: Input: default@test_table@dt=2011 diff --git a/ql/src/test/results/clientpositive/alter_table_serde2.q.out b/ql/src/test/results/clientpositive/alter_table_serde2.q.out index dd946e5..a1e1fb2 100644 --- a/ql/src/test/results/clientpositive/alter_table_serde2.q.out +++ b/ql/src/test/results/clientpositive/alter_table_serde2.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- Tests that when overwriting a partition in a table after altering the serde properties --- the partition metadata is updated as well. - -CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tst1 -POSTHOOK: query: -- Tests that when overwriting a partition in a table after altering the serde properties --- the partition metadata is updated as well. - -CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE tst1(key STRING, value STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tst1 @@ -96,15 +90,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test altering the serde properties - -ALTER TABLE tst1 SET SERDEPROPERTIES ('field.delim' = ',') +PREHOOK: query: ALTER TABLE tst1 SET SERDEPROPERTIES ('field.delim' = ',') PREHOOK: type: ALTERTABLE_SERDEPROPERTIES PREHOOK: Input: default@tst1 PREHOOK: Output: default@tst1 -POSTHOOK: query: -- Test altering the serde properties - -ALTER TABLE tst1 SET SERDEPROPERTIES ('field.delim' = ',') +POSTHOOK: query: ALTER TABLE tst1 SET SERDEPROPERTIES ('field.delim' = ',') POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES POSTHOOK: Input: default@tst1 POSTHOOK: Output: default@tst1 diff --git a/ql/src/test/results/clientpositive/alter_table_update_status.q.out b/ql/src/test/results/clientpositive/alter_table_update_status.q.out index b2fefb7..a3c4f1a 100644 --- a/ql/src/test/results/clientpositive/alter_table_update_status.q.out +++ b/ql/src/test/results/clientpositive/alter_table_update_status.q.out @@ -350,12 +350,10 @@ POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment bin binary from deserializer -PREHOOK: query: --tinyint -DESC FORMATTED datatype_stats t +PREHOOK: query: DESC FORMATTED datatype_stats t PREHOOK: type: DESCTABLE PREHOOK: Input: default@datatype_stats -POSTHOOK: query: --tinyint -DESC FORMATTED datatype_stats t +POSTHOOK: query: DESC FORMATTED datatype_stats t POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment @@ -374,12 +372,10 @@ POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment t tinyint 35 234 233 232 from deserializer -PREHOOK: query: --smallint -DESC FORMATTED datatype_stats s +PREHOOK: query: DESC FORMATTED datatype_stats s PREHOOK: type: DESCTABLE PREHOOK: Input: default@datatype_stats -POSTHOOK: query: --smallint -DESC FORMATTED datatype_stats s +POSTHOOK: query: DESC FORMATTED datatype_stats s POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment @@ -398,12 +394,10 @@ POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment s smallint 25 489 56 56 from deserializer -PREHOOK: query: --int -DESC FORMATTED datatype_stats i +PREHOOK: query: DESC FORMATTED datatype_stats i PREHOOK: type: DESCTABLE PREHOOK: Input: default@datatype_stats -POSTHOOK: query: --int -DESC FORMATTED datatype_stats i +POSTHOOK: query: DESC FORMATTED datatype_stats i POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment @@ -422,12 +416,10 @@ POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment i int 5 889 1 59 from deserializer -PREHOOK: query: --bigint -DESC FORMATTED datatype_stats b +PREHOOK: query: DESC FORMATTED datatype_stats b PREHOOK: type: DESCTABLE PREHOOK: Input: default@datatype_stats -POSTHOOK: query: --bigint -DESC FORMATTED datatype_stats b +POSTHOOK: query: DESC FORMATTED datatype_stats b POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment @@ -446,12 +438,10 @@ POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment b bigint 8 89 14 9 from deserializer -PREHOOK: query: --float -DESC FORMATTED datatype_stats f +PREHOOK: query: DESC FORMATTED datatype_stats f PREHOOK: type: DESCTABLE PREHOOK: Input: default@datatype_stats -POSTHOOK: query: --float -DESC FORMATTED datatype_stats f +POSTHOOK: query: DESC FORMATTED datatype_stats f POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment @@ -470,12 +460,10 @@ POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment f float 8.0 2345.656 45 563 from deserializer -PREHOOK: query: --double -DESC FORMATTED datatype_stats d +PREHOOK: query: DESC FORMATTED datatype_stats d PREHOOK: type: DESCTABLE PREHOOK: Input: default@datatype_stats -POSTHOOK: query: --double -DESC FORMATTED datatype_stats d +POSTHOOK: query: DESC FORMATTED datatype_stats d POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment @@ -494,12 +482,10 @@ POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment d double 0.00455 560.3367 12 5677 from deserializer -PREHOOK: query: --decimal -DESC FORMATTED datatype_stats dem +PREHOOK: query: DESC FORMATTED datatype_stats dem PREHOOK: type: DESCTABLE PREHOOK: Input: default@datatype_stats -POSTHOOK: query: --decimal -DESC FORMATTED datatype_stats dem +POSTHOOK: query: DESC FORMATTED datatype_stats dem POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment @@ -518,12 +504,10 @@ POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment dem decimal(10,0) 0 560 912 57 from deserializer -PREHOOK: query: --timestamp -DESC FORMATTED datatype_stats ts +PREHOOK: query: DESC FORMATTED datatype_stats ts PREHOOK: type: DESCTABLE PREHOOK: Input: default@datatype_stats -POSTHOOK: query: --timestamp -DESC FORMATTED datatype_stats ts +POSTHOOK: query: DESC FORMATTED datatype_stats ts POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment @@ -542,12 +526,10 @@ POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment ts timestamp 1357030924 1357030923 12 7 from deserializer -PREHOOK: query: --decimal -DESC FORMATTED datatype_stats dt +PREHOOK: query: DESC FORMATTED datatype_stats dt PREHOOK: type: DESCTABLE PREHOOK: Input: default@datatype_stats -POSTHOOK: query: --decimal -DESC FORMATTED datatype_stats dt +POSTHOOK: query: DESC FORMATTED datatype_stats dt POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment @@ -566,12 +548,10 @@ POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment dt date 2001-02-04 2012-01-01 912 57 from deserializer -PREHOOK: query: --string -DESC FORMATTED datatype_stats str +PREHOOK: query: DESC FORMATTED datatype_stats str PREHOOK: type: DESCTABLE PREHOOK: Input: default@datatype_stats -POSTHOOK: query: --string -DESC FORMATTED datatype_stats str +POSTHOOK: query: DESC FORMATTED datatype_stats str POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment @@ -590,12 +570,10 @@ POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment str string 233 232 2.34 235 from deserializer -PREHOOK: query: --varchar -DESC FORMATTED datatype_stats v +PREHOOK: query: DESC FORMATTED datatype_stats v PREHOOK: type: DESCTABLE PREHOOK: Input: default@datatype_stats -POSTHOOK: query: --varchar -DESC FORMATTED datatype_stats v +POSTHOOK: query: DESC FORMATTED datatype_stats v POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment @@ -614,12 +592,10 @@ POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment v varchar(12) 33 22 4.4 25 from deserializer -PREHOOK: query: --char -DESC FORMATTED datatype_stats c +PREHOOK: query: DESC FORMATTED datatype_stats c PREHOOK: type: DESCTABLE PREHOOK: Input: default@datatype_stats -POSTHOOK: query: --char -DESC FORMATTED datatype_stats c +POSTHOOK: query: DESC FORMATTED datatype_stats c POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment @@ -638,12 +614,10 @@ POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment c char(5) 3 2 9.0 58 from deserializer -PREHOOK: query: --boolean -DESC FORMATTED datatype_stats bl +PREHOOK: query: DESC FORMATTED datatype_stats bl PREHOOK: type: DESCTABLE PREHOOK: Input: default@datatype_stats -POSTHOOK: query: --boolean -DESC FORMATTED datatype_stats bl +POSTHOOK: query: DESC FORMATTED datatype_stats bl POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment @@ -662,12 +636,10 @@ POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment bl boolean 1 9 8 from deserializer -PREHOOK: query: --binary -DESC FORMATTED datatype_stats bin +PREHOOK: query: DESC FORMATTED datatype_stats bin PREHOOK: type: DESCTABLE PREHOOK: Input: default@datatype_stats -POSTHOOK: query: --binary -DESC FORMATTED datatype_stats bin +POSTHOOK: query: DESC FORMATTED datatype_stats bin POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@datatype_stats # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment diff --git a/ql/src/test/results/clientpositive/alter_varchar1.q.out b/ql/src/test/results/clientpositive/alter_varchar1.q.out index d1928ef..194a48d 100644 --- a/ql/src/test/results/clientpositive/alter_varchar1.q.out +++ b/ql/src/test/results/clientpositive/alter_varchar1.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create database avc +PREHOOK: query: create database avc PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:avc -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create database avc +POSTHOOK: query: create database avc POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:avc PREHOOK: query: create table avc.alter_varchar_1 (key string, value string) @@ -41,23 +37,19 @@ POSTHOOK: Input: avc@alter_varchar_1 0 val_0 10 val_10 100 val_100 -PREHOOK: query: -- change column to varchar -alter table avc.alter_varchar_1 change column value value varchar(20) +PREHOOK: query: alter table avc.alter_varchar_1 change column value value varchar(20) PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: avc@alter_varchar_1 PREHOOK: Output: avc@alter_varchar_1 -POSTHOOK: query: -- change column to varchar -alter table avc.alter_varchar_1 change column value value varchar(20) +POSTHOOK: query: alter table avc.alter_varchar_1 change column value value varchar(20) POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: avc@alter_varchar_1 POSTHOOK: Output: avc@alter_varchar_1 -PREHOOK: query: -- contents should still look the same -select * from avc.alter_varchar_1 +PREHOOK: query: select * from avc.alter_varchar_1 PREHOOK: type: QUERY PREHOOK: Input: avc@alter_varchar_1 #### A masked pattern was here #### -POSTHOOK: query: -- contents should still look the same -select * from avc.alter_varchar_1 +POSTHOOK: query: select * from avc.alter_varchar_1 POSTHOOK: type: QUERY POSTHOOK: Input: avc@alter_varchar_1 #### A masked pattern was here #### @@ -66,23 +58,19 @@ POSTHOOK: Input: avc@alter_varchar_1 0 val_0 10 val_10 100 val_100 -PREHOOK: query: -- change column to smaller varchar -alter table avc.alter_varchar_1 change column value value varchar(3) +PREHOOK: query: alter table avc.alter_varchar_1 change column value value varchar(3) PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: avc@alter_varchar_1 PREHOOK: Output: avc@alter_varchar_1 -POSTHOOK: query: -- change column to smaller varchar -alter table avc.alter_varchar_1 change column value value varchar(3) +POSTHOOK: query: alter table avc.alter_varchar_1 change column value value varchar(3) POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: avc@alter_varchar_1 POSTHOOK: Output: avc@alter_varchar_1 -PREHOOK: query: -- value column should be truncated now -select * from avc.alter_varchar_1 +PREHOOK: query: select * from avc.alter_varchar_1 PREHOOK: type: QUERY PREHOOK: Input: avc@alter_varchar_1 #### A masked pattern was here #### -POSTHOOK: query: -- value column should be truncated now -select * from avc.alter_varchar_1 +POSTHOOK: query: select * from avc.alter_varchar_1 POSTHOOK: type: QUERY POSTHOOK: Input: avc@alter_varchar_1 #### A masked pattern was here #### @@ -91,23 +79,19 @@ POSTHOOK: Input: avc@alter_varchar_1 0 val 10 val 100 val -PREHOOK: query: -- change back to bigger varchar -alter table avc.alter_varchar_1 change column value value varchar(20) +PREHOOK: query: alter table avc.alter_varchar_1 change column value value varchar(20) PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: avc@alter_varchar_1 PREHOOK: Output: avc@alter_varchar_1 -POSTHOOK: query: -- change back to bigger varchar -alter table avc.alter_varchar_1 change column value value varchar(20) +POSTHOOK: query: alter table avc.alter_varchar_1 change column value value varchar(20) POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: avc@alter_varchar_1 POSTHOOK: Output: avc@alter_varchar_1 -PREHOOK: query: -- column values should be full size again -select * from avc.alter_varchar_1 +PREHOOK: query: select * from avc.alter_varchar_1 PREHOOK: type: QUERY PREHOOK: Input: avc@alter_varchar_1 #### A masked pattern was here #### -POSTHOOK: query: -- column values should be full size again -select * from avc.alter_varchar_1 +POSTHOOK: query: select * from avc.alter_varchar_1 POSTHOOK: type: QUERY POSTHOOK: Input: avc@alter_varchar_1 #### A masked pattern was here #### @@ -116,13 +100,11 @@ POSTHOOK: Input: avc@alter_varchar_1 0 val_0 10 val_10 100 val_100 -PREHOOK: query: -- add varchar column -alter table avc.alter_varchar_1 add columns (key2 int, value2 varchar(10)) +PREHOOK: query: alter table avc.alter_varchar_1 add columns (key2 int, value2 varchar(10)) PREHOOK: type: ALTERTABLE_ADDCOLS PREHOOK: Input: avc@alter_varchar_1 PREHOOK: Output: avc@alter_varchar_1 -POSTHOOK: query: -- add varchar column -alter table avc.alter_varchar_1 add columns (key2 int, value2 varchar(10)) +POSTHOOK: query: alter table avc.alter_varchar_1 add columns (key2 int, value2 varchar(10)) POSTHOOK: type: ALTERTABLE_ADDCOLS POSTHOOK: Input: avc@alter_varchar_1 POSTHOOK: Output: avc@alter_varchar_1 diff --git a/ql/src/test/results/clientpositive/alter_varchar2.q.out b/ql/src/test/results/clientpositive/alter_varchar2.q.out index c589bf2..5f8bbb3 100644 --- a/ql/src/test/results/clientpositive/alter_varchar2.q.out +++ b/ql/src/test/results/clientpositive/alter_varchar2.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- alter column type, with partitioned table -drop table if exists alter_varchar2 +PREHOOK: query: drop table if exists alter_varchar2 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- alter column type, with partitioned table -drop table if exists alter_varchar2 +POSTHOOK: query: drop table if exists alter_varchar2 POSTHOOK: type: DROPTABLE PREHOOK: query: create table alter_varchar2 ( c1 varchar(255) diff --git a/ql/src/test/results/clientpositive/ambiguitycheck.q.out b/ql/src/test/results/clientpositive/ambiguitycheck.q.out index 3c0f3aa..3574573 100644 --- a/ql/src/test/results/clientpositive/ambiguitycheck.q.out +++ b/ql/src/test/results/clientpositive/ambiguitycheck.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- check cluster/distribute/partitionBy -SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,value) +PREHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,value) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- check cluster/distribute/partitionBy -SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,value) +POSTHOOK: query: SELECT * FROM SRC x where x.key = 20 CLUSTER BY (key,value) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### @@ -45,8 +43,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 20 val_20 -PREHOOK: query: -- HIVE-6950 -SELECT tab1.key, +PREHOOK: query: SELECT tab1.key, tab1.value, SUM(1) FROM src as tab1 @@ -56,8 +53,7 @@ GROUPING SETS ((tab1.key, tab1.value)) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- HIVE-6950 -SELECT tab1.key, +POSTHOOK: query: SELECT tab1.key, tab1.value, SUM(1) FROM src as tab1 diff --git a/ql/src/test/results/clientpositive/ambiguous_col.q.out b/ql/src/test/results/clientpositive/ambiguous_col.q.out index a10b9c5..a7682fb 100644 --- a/ql/src/test/results/clientpositive/ambiguous_col.q.out +++ b/ql/src/test/results/clientpositive/ambiguous_col.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- TOK_ALLCOLREF -explain select * from (select a.key, a.* from (select * from src) a join (select * from src1) b on (a.key = b.key)) t +PREHOOK: query: explain select * from (select a.key, a.* from (select * from src) a join (select * from src1) b on (a.key = b.key)) t PREHOOK: type: QUERY -POSTHOOK: query: -- TOK_ALLCOLREF -explain select * from (select a.key, a.* from (select * from src) a join (select * from src1) b on (a.key = b.key)) t +POSTHOOK: query: explain select * from (select a.key, a.* from (select * from src) a join (select * from src1) b on (a.key = b.key)) t POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -117,11 +115,9 @@ POSTHOOK: Input: default@src1 66 66 val_66 98 98 val_98 98 98 val_98 -PREHOOK: query: -- DOT -explain select * from (select a.key, a.`[k].*` from (select * from src) a join (select * from src1) b on (a.key = b.key)) t +PREHOOK: query: explain select * from (select a.key, a.`[k].*` from (select * from src) a join (select * from src1) b on (a.key = b.key)) t PREHOOK: type: QUERY -POSTHOOK: query: -- DOT -explain select * from (select a.key, a.`[k].*` from (select * from src) a join (select * from src1) b on (a.key = b.key)) t +POSTHOOK: query: explain select * from (select a.key, a.`[k].*` from (select * from src) a join (select * from src1) b on (a.key = b.key)) t POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -235,11 +231,9 @@ POSTHOOK: Input: default@src1 66 66 98 98 98 98 -PREHOOK: query: -- EXPRESSION -explain select * from (select a.key, a.key from (select * from src) a join (select * from src1) b on (a.key = b.key)) t +PREHOOK: query: explain select * from (select a.key, a.key from (select * from src) a join (select * from src1) b on (a.key = b.key)) t PREHOOK: type: QUERY -POSTHOOK: query: -- EXPRESSION -explain select * from (select a.key, a.key from (select * from src) a join (select * from src1) b on (a.key = b.key)) t +POSTHOOK: query: explain select * from (select a.key, a.key from (select * from src) a join (select * from src1) b on (a.key = b.key)) t POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/analyze_table_null_partition.q.out b/ql/src/test/results/clientpositive/analyze_table_null_partition.q.out index 1cca288..c6691d6 100644 --- a/ql/src/test/results/clientpositive/analyze_table_null_partition.q.out +++ b/ql/src/test/results/clientpositive/analyze_table_null_partition.q.out @@ -69,12 +69,10 @@ POSTHOOK: Output: default@test2@age=15 POSTHOOK: Output: default@test2@age=30 POSTHOOK: Output: default@test2@age=40 POSTHOOK: Output: default@test2@age=__HIVE_DEFAULT_PARTITION__ -PREHOOK: query: -- To show stats. It doesn't show due to a bug. -DESC EXTENDED test2 +PREHOOK: query: DESC EXTENDED test2 PREHOOK: type: DESCTABLE PREHOOK: Input: default@test2 -POSTHOOK: query: -- To show stats. It doesn't show due to a bug. -DESC EXTENDED test2 +POSTHOOK: query: DESC EXTENDED test2 POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@test2 name string @@ -86,11 +84,9 @@ age int age int #### A masked pattern was here #### -PREHOOK: query: -- Another way to show stats. -EXPLAIN EXTENDED select * from test2 +PREHOOK: query: EXPLAIN EXTENDED select * from test2 PREHOOK: type: QUERY -POSTHOOK: query: -- Another way to show stats. -EXPLAIN EXTENDED select * from test2 +POSTHOOK: query: EXPLAIN EXTENDED select * from test2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/annotate_stats_filter.q.out b/ql/src/test/results/clientpositive/annotate_stats_filter.q.out index bd0b3bb..e22c3ef 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_filter.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_filter.q.out @@ -52,11 +52,9 @@ POSTHOOK: Lineage: loc_orc.locid SIMPLE [(loc_staging)loc_staging.FieldSchema(na POSTHOOK: Lineage: loc_orc.state SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ] POSTHOOK: Lineage: loc_orc.year SIMPLE [(loc_staging)loc_staging.FieldSchema(name:year, type:int, comment:null), ] POSTHOOK: Lineage: loc_orc.zip SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, comment:null), ] -PREHOOK: query: -- numRows: 8 rawDataSize: 796 -explain select * from loc_orc +PREHOOK: query: explain select * from loc_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 8 rawDataSize: 796 -explain select * from loc_orc +POSTHOOK: query: explain select * from loc_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -75,13 +73,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- column stats are not COMPLETE, so stats are not updated --- numRows: 8 rawDataSize: 796 -explain select * from loc_orc where state='OH' +PREHOOK: query: explain select * from loc_orc where state='OH' PREHOOK: type: QUERY -POSTHOOK: query: -- column stats are not COMPLETE, so stats are not updated --- numRows: 8 rawDataSize: 796 -explain select * from loc_orc where state='OH' +POSTHOOK: query: explain select * from loc_orc where state='OH' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -123,13 +117,9 @@ POSTHOOK: query: analyze table loc_orc compute statistics for columns state,loci POSTHOOK: type: QUERY POSTHOOK: Input: default@loc_orc #### A masked pattern was here #### -PREHOOK: query: -- state column has 5 distincts. numRows/countDistincts --- numRows: 1 rawDataSize: 102 -explain select * from loc_orc where state='OH' +PREHOOK: query: explain select * from loc_orc where state='OH' PREHOOK: type: QUERY -POSTHOOK: query: -- state column has 5 distincts. numRows/countDistincts --- numRows: 1 rawDataSize: 102 -explain select * from loc_orc where state='OH' +POSTHOOK: query: explain select * from loc_orc where state='OH' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -163,13 +153,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- not equals comparison shouldn't affect number of rows --- numRows: 8 rawDataSize: 804 -explain select * from loc_orc where state!='OH' +PREHOOK: query: explain select * from loc_orc where state!='OH' PREHOOK: type: QUERY -POSTHOOK: query: -- not equals comparison shouldn't affect number of rows --- numRows: 8 rawDataSize: 804 -explain select * from loc_orc where state!='OH' +POSTHOOK: query: explain select * from loc_orc where state!='OH' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -239,13 +225,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- nulls are treated as constant equality comparison --- numRows: 1 rawDataSize: 102 -explain select * from loc_orc where zip is null +PREHOOK: query: explain select * from loc_orc where zip is null PREHOOK: type: QUERY -POSTHOOK: query: -- nulls are treated as constant equality comparison --- numRows: 1 rawDataSize: 102 -explain select * from loc_orc where zip is null +POSTHOOK: query: explain select * from loc_orc where zip is null POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -279,11 +261,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- numRows: 1 rawDataSize: 102 -explain select * from loc_orc where !(zip is not null) +PREHOOK: query: explain select * from loc_orc where !(zip is not null) PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 1 rawDataSize: 102 -explain select * from loc_orc where !(zip is not null) +POSTHOOK: query: explain select * from loc_orc where !(zip is not null) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -317,13 +297,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- not nulls are treated as inverse of nulls --- numRows: 7 rawDataSize: 702 -explain select * from loc_orc where zip is not null +PREHOOK: query: explain select * from loc_orc where zip is not null PREHOOK: type: QUERY -POSTHOOK: query: -- not nulls are treated as inverse of nulls --- numRows: 7 rawDataSize: 702 -explain select * from loc_orc where zip is not null +POSTHOOK: query: explain select * from loc_orc where zip is not null POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -357,11 +333,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- numRows: 7 rawDataSize: 702 -explain select * from loc_orc where !(zip is null) +PREHOOK: query: explain select * from loc_orc where !(zip is null) PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 7 rawDataSize: 702 -explain select * from loc_orc where !(zip is null) +POSTHOOK: query: explain select * from loc_orc where !(zip is null) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -395,13 +369,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- NOT evaluation. true will pass all rows, false will not pass any rows --- numRows: 8 rawDataSize: 804 -explain select * from loc_orc where !false +PREHOOK: query: explain select * from loc_orc where !false PREHOOK: type: QUERY -POSTHOOK: query: -- NOT evaluation. true will pass all rows, false will not pass any rows --- numRows: 8 rawDataSize: 804 -explain select * from loc_orc where !false +POSTHOOK: query: explain select * from loc_orc where !false POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -420,11 +390,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 0 rawDataSize: 0 -explain select * from loc_orc where !true +PREHOOK: query: explain select * from loc_orc where !true PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 0 rawDataSize: 0 -explain select * from loc_orc where !true +POSTHOOK: query: explain select * from loc_orc where !true POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -458,13 +426,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Constant evaluation. true will pass all rows, false will not pass any rows --- numRows: 8 rawDataSize: 804 -explain select * from loc_orc where true +PREHOOK: query: explain select * from loc_orc where true PREHOOK: type: QUERY -POSTHOOK: query: -- Constant evaluation. true will pass all rows, false will not pass any rows --- numRows: 8 rawDataSize: 804 -explain select * from loc_orc where true +POSTHOOK: query: explain select * from loc_orc where true POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -483,11 +447,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 8 rawDataSize: 804 -explain select * from loc_orc where 'foo' +PREHOOK: query: explain select * from loc_orc where 'foo' PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 8 rawDataSize: 804 -explain select * from loc_orc where 'foo' +POSTHOOK: query: explain select * from loc_orc where 'foo' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -521,11 +483,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- numRows: 8 rawDataSize: 804 -explain select * from loc_orc where true = true +PREHOOK: query: explain select * from loc_orc where true = true PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 8 rawDataSize: 804 -explain select * from loc_orc where true = true +POSTHOOK: query: explain select * from loc_orc where true = true POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -544,11 +504,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 0 rawDataSize: 0 -explain select * from loc_orc where false = true +PREHOOK: query: explain select * from loc_orc where false = true PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 0 rawDataSize: 0 -explain select * from loc_orc where false = true +POSTHOOK: query: explain select * from loc_orc where false = true POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -582,11 +540,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- numRows: 0 rawDataSize: 0 -explain select * from loc_orc where 'foo' = 'bar' +PREHOOK: query: explain select * from loc_orc where 'foo' = 'bar' PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 0 rawDataSize: 0 -explain select * from loc_orc where 'foo' = 'bar' +POSTHOOK: query: explain select * from loc_orc where 'foo' = 'bar' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -620,11 +576,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- numRows: 0 rawDataSize: 0 -explain select * from loc_orc where false +PREHOOK: query: explain select * from loc_orc where false PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 0 rawDataSize: 0 -explain select * from loc_orc where false +POSTHOOK: query: explain select * from loc_orc where false POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -658,13 +612,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- OR evaluation. 1 row for OH and 1 row for CA --- numRows: 2 rawDataSize: 204 -explain select * from loc_orc where state='OH' or state='CA' +PREHOOK: query: explain select * from loc_orc where state='OH' or state='CA' PREHOOK: type: QUERY -POSTHOOK: query: -- OR evaluation. 1 row for OH and 1 row for CA --- numRows: 2 rawDataSize: 204 -explain select * from loc_orc where state='OH' or state='CA' +POSTHOOK: query: explain select * from loc_orc where state='OH' or state='CA' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -698,13 +648,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- AND evaluation. cascadingly apply rules. 8/2 = 4/2 = 2 --- numRows: 2 rawDataSize: 204 -explain select * from loc_orc where year=2001 and year is null +PREHOOK: query: explain select * from loc_orc where year=2001 and year is null PREHOOK: type: QUERY -POSTHOOK: query: -- AND evaluation. cascadingly apply rules. 8/2 = 4/2 = 2 --- numRows: 2 rawDataSize: 204 -explain select * from loc_orc where year=2001 and year is null +POSTHOOK: query: explain select * from loc_orc where year=2001 and year is null POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -738,11 +684,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- numRows: 1 rawDataSize: 102 -explain select * from loc_orc where year=2001 and state='OH' and state='FL' +PREHOOK: query: explain select * from loc_orc where year=2001 and state='OH' and state='FL' PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 1 rawDataSize: 102 -explain select * from loc_orc where year=2001 and state='OH' and state='FL' +POSTHOOK: query: explain select * from loc_orc where year=2001 and state='OH' and state='FL' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -776,13 +720,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- AND and OR together. left expr will yield 1 row and right will yield 1 row --- numRows: 3 rawDataSize: 306 -explain select * from loc_orc where (year=2001 and year is null) or (state='CA') +PREHOOK: query: explain select * from loc_orc where (year=2001 and year is null) or (state='CA') PREHOOK: type: QUERY -POSTHOOK: query: -- AND and OR together. left expr will yield 1 row and right will yield 1 row --- numRows: 3 rawDataSize: 306 -explain select * from loc_orc where (year=2001 and year is null) or (state='CA') +POSTHOOK: query: explain select * from loc_orc where (year=2001 and year is null) or (state='CA') POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -816,13 +756,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- AND and OR together. left expr will yield 8 rows and right will yield 1 row --- numRows: 1 rawDataSize: 102 -explain select * from loc_orc where (year=2001 or year is null) and (state='CA') +PREHOOK: query: explain select * from loc_orc where (year=2001 or year is null) and (state='CA') PREHOOK: type: QUERY -POSTHOOK: query: -- AND and OR together. left expr will yield 8 rows and right will yield 1 row --- numRows: 1 rawDataSize: 102 -explain select * from loc_orc where (year=2001 or year is null) and (state='CA') +POSTHOOK: query: explain select * from loc_orc where (year=2001 or year is null) and (state='CA') POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -856,15 +792,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- inequality conditions falling out of range. total or zero (converted to one) --- numRows: 1 rawDataSize: 102 --- numRows: 8 rawDataSize: 804 -explain select * from loc_orc where locid < 30 +PREHOOK: query: explain select * from loc_orc where locid < 30 PREHOOK: type: QUERY -POSTHOOK: query: -- inequality conditions falling out of range. total or zero (converted to one) --- numRows: 1 rawDataSize: 102 --- numRows: 8 rawDataSize: 804 -explain select * from loc_orc where locid < 30 +POSTHOOK: query: explain select * from loc_orc where locid < 30 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1006,13 +936,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- all inequality conditions falling within range. rows/3 is the rules --- numRows: 2 rawDataSize: 204 -explain select * from loc_orc where locid < 3 +PREHOOK: query: explain select * from loc_orc where locid < 3 PREHOOK: type: QUERY -POSTHOOK: query: -- all inequality conditions falling within range. rows/3 is the rules --- numRows: 2 rawDataSize: 204 -explain select * from loc_orc where locid < 3 +POSTHOOK: query: explain select * from loc_orc where locid < 3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out b/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out index d134d27..a8e4854 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_groupby.q.out @@ -1,24 +1,4 @@ -PREHOOK: query: -- hash aggregation is disabled - --- There are different cases for Group By depending on map/reduce side, hash aggregation, --- grouping sets and column stats. If we don't have column stats, we just assume hash --- aggregation is disabled. Following are the possible cases and rule for cardinality --- estimation - --- MAP SIDE: --- Case 1: NO column stats, NO hash aggregation, NO grouping sets — numRows --- Case 2: NO column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet --- Case 3: column stats, hash aggregation, NO grouping sets — Min(numRows / 2, ndvProduct * parallelism) --- Case 4: column stats, hash aggregation, grouping sets — Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet) --- Case 5: column stats, NO hash aggregation, NO grouping sets — numRows --- Case 6: column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet - --- REDUCE SIDE: --- Case 7: NO column stats — numRows / 2 --- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet) --- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct) - -create table if not exists loc_staging ( +PREHOOK: query: create table if not exists loc_staging ( state string, locid int, zip bigint, @@ -27,27 +7,7 @@ create table if not exists loc_staging ( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@loc_staging -POSTHOOK: query: -- hash aggregation is disabled - --- There are different cases for Group By depending on map/reduce side, hash aggregation, --- grouping sets and column stats. If we don't have column stats, we just assume hash --- aggregation is disabled. Following are the possible cases and rule for cardinality --- estimation - --- MAP SIDE: --- Case 1: NO column stats, NO hash aggregation, NO grouping sets — numRows --- Case 2: NO column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet --- Case 3: column stats, hash aggregation, NO grouping sets — Min(numRows / 2, ndvProduct * parallelism) --- Case 4: column stats, hash aggregation, grouping sets — Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet) --- Case 5: column stats, NO hash aggregation, NO grouping sets — numRows --- Case 6: column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet - --- REDUCE SIDE: --- Case 7: NO column stats — numRows / 2 --- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet) --- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct) - -create table if not exists loc_staging ( +POSTHOOK: query: create table if not exists loc_staging ( state string, locid int, zip bigint, @@ -92,11 +52,9 @@ POSTHOOK: Lineage: loc_orc.locid SIMPLE [(loc_staging)loc_staging.FieldSchema(na POSTHOOK: Lineage: loc_orc.state SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ] POSTHOOK: Lineage: loc_orc.year SIMPLE [(loc_staging)loc_staging.FieldSchema(name:year, type:int, comment:null), ] POSTHOOK: Lineage: loc_orc.zip SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, comment:null), ] -PREHOOK: query: -- numRows: 8 rawDataSize: 796 -explain select * from loc_orc +PREHOOK: query: explain select * from loc_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 8 rawDataSize: 796 -explain select * from loc_orc +POSTHOOK: query: explain select * from loc_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -115,28 +73,22 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- partial column stats -analyze table loc_orc compute statistics for columns state +PREHOOK: query: analyze table loc_orc compute statistics for columns state PREHOOK: type: QUERY PREHOOK: Input: default@loc_orc #### A masked pattern was here #### -POSTHOOK: query: -- partial column stats -analyze table loc_orc compute statistics for columns state +POSTHOOK: query: analyze table loc_orc compute statistics for columns state POSTHOOK: type: QUERY POSTHOOK: Input: default@loc_orc #### A masked pattern was here #### -PREHOOK: query: -- inner group by: map - numRows: 8 reduce - numRows: 4 --- outer group by: map - numRows: 4 reduce numRows: 2 -explain select a, c, min(b) +PREHOOK: query: explain select a, c, min(b) from ( select state as a, locid as b, count(*) as c from loc_orc group by state,locid ) sq1 group by a,c PREHOOK: type: QUERY -POSTHOOK: query: -- inner group by: map - numRows: 8 reduce - numRows: 4 --- outer group by: map - numRows: 4 reduce numRows: 2 -explain select a, c, min(b) +POSTHOOK: query: explain select a, c, min(b) from ( select state as a, locid as b, count(*) as c from loc_orc group by state,locid @@ -230,13 +182,9 @@ POSTHOOK: query: analyze table loc_orc compute statistics for columns state,loci POSTHOOK: type: QUERY POSTHOOK: Input: default@loc_orc #### A masked pattern was here #### -PREHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 --- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select year from loc_orc group by year +PREHOOK: query: explain select year from loc_orc group by year PREHOOK: type: QUERY -POSTHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 --- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select year from loc_orc group by year +POSTHOOK: query: explain select year from loc_orc group by year POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -283,13 +231,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 --- Case 9: column stats, NO grouping sets - caridnality = 8 -explain select state,locid from loc_orc group by state,locid +PREHOOK: query: explain select state,locid from loc_orc group by state,locid PREHOOK: type: QUERY -POSTHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 --- Case 9: column stats, NO grouping sets - caridnality = 8 -explain select state,locid from loc_orc group by state,locid +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -336,13 +280,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 --- Case 8: column stats, grouping sets - cardinality = 32 -explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: query: explain select state,locid from loc_orc group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 --- Case 8: column stats, grouping sets - cardinality = 32 -explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -390,13 +330,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 --- Case 8: column stats, grouping sets - cardinality = 24 -explain select state,locid from loc_orc group by state,locid with rollup +PREHOOK: query: explain select state,locid from loc_orc group by state,locid with rollup PREHOOK: type: QUERY -POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 --- Case 8: column stats, grouping sets - cardinality = 24 -explain select state,locid from loc_orc group by state,locid with rollup +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with rollup POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -494,13 +430,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 8 --- Case 8: column stats, grouping sets - cardinality = 8 -explain select state,locid from loc_orc group by state,locid grouping sets((state)) +PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state)) PREHOOK: type: QUERY -POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 8 --- Case 8: column stats, grouping sets - cardinality = 8 -explain select state,locid from loc_orc group by state,locid grouping sets((state)) +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -548,13 +480,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 16 --- Case 8: column stats, grouping sets - cardinality = 16 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) PREHOOK: type: QUERY -POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 16 --- Case 8: column stats, grouping sets - cardinality = 16 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -602,13 +530,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 --- Case 8: column stats, grouping sets - cardinality = 24 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 --- Case 8: column stats, grouping sets - cardinality = 24 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -656,13 +580,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 --- Case 8: column stats, grouping sets - cardinality = 32 -explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 --- Case 8: column stats, grouping sets - cardinality = 32 -explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -710,17 +630,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side parallelism will be 10 - --- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 --- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select year from loc_orc group by year +PREHOOK: query: explain select year from loc_orc group by year PREHOOK: type: QUERY -POSTHOOK: query: -- map-side parallelism will be 10 - --- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 --- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select year from loc_orc group by year +POSTHOOK: query: explain select year from loc_orc group by year POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -767,13 +679,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 4: column stats, hash aggregation, grouping sets - cardinality = 16 --- Case 8: column stats, grouping sets - cardinality = 16 -explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: query: explain select state,locid from loc_orc group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: -- Case 4: column stats, hash aggregation, grouping sets - cardinality = 16 --- Case 8: column stats, grouping sets - cardinality = 16 -explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -821,15 +729,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- ndvProduct becomes 0 as zip does not have column stats --- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 --- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select state,zip from loc_orc group by state,zip +PREHOOK: query: explain select state,zip from loc_orc group by state,zip PREHOOK: type: QUERY -POSTHOOK: query: -- ndvProduct becomes 0 as zip does not have column stats --- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 --- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select state,zip from loc_orc group by state,zip +POSTHOOK: query: explain select state,zip from loc_orc group by state,zip POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -876,13 +778,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 --- Case 7: NO column stats - cardinality = 16 -explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: query: explain select state,locid from loc_orc group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 --- Case 7: NO column stats - cardinality = 16 -explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -930,13 +828,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 --- Case 7: NO column stats - cardinality = 12 -explain select state,locid from loc_orc group by state,locid with rollup +PREHOOK: query: explain select state,locid from loc_orc group by state,locid with rollup PREHOOK: type: QUERY -POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 --- Case 7: NO column stats - cardinality = 12 -explain select state,locid from loc_orc group by state,locid with rollup +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with rollup POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1034,13 +928,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 --- Case 7: NO column stats - cardinality = 4 -explain select state,locid from loc_orc group by state,locid grouping sets((state)) +PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state)) PREHOOK: type: QUERY -POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 --- Case 7: NO column stats - cardinality = 4 -explain select state,locid from loc_orc group by state,locid grouping sets((state)) +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1088,13 +978,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 16 --- Case 7: NO column stats - cardinality = 8 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) PREHOOK: type: QUERY -POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 16 --- Case 7: NO column stats - cardinality = 8 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1142,13 +1028,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 --- Case 7: NO column stats - cardinality = 12 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 --- Case 7: NO column stats - cardinality = 12 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1196,13 +1078,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 --- Case 7: NO column stats - cardinality = 16 -explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 --- Case 7: NO column stats - cardinality = 16 -explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1250,13 +1128,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 --- Case 7: NO column stats - cardinality = 4 -explain select year from loc_orc group by year +PREHOOK: query: explain select year from loc_orc group by year PREHOOK: type: QUERY -POSTHOOK: query: -- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 --- Case 7: NO column stats - cardinality = 4 -explain select year from loc_orc group by year +POSTHOOK: query: explain select year from loc_orc group by year POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1303,13 +1177,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 --- Case 7: NO column stats - cardinality = 16 -explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: query: explain select state,locid from loc_orc group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 --- Case 7: NO column stats - cardinality = 16 -explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out b/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out index 5e5efa8..31c4ed1 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_groupby2.q.out @@ -2,47 +2,11 @@ PREHOOK: query: drop table location PREHOOK: type: DROPTABLE POSTHOOK: query: drop table location POSTHOOK: type: DROPTABLE -PREHOOK: query: -- There are different cases for Group By depending on map/reduce side, hash aggregation, --- grouping sets and column stats. If we don't have column stats, we just assume hash --- aggregation is disabled. Following are the possible cases and rule for cardinality --- estimation - --- MAP SIDE: --- Case 1: NO column stats, NO hash aggregation, NO grouping sets — numRows --- Case 2: NO column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet --- Case 3: column stats, hash aggregation, NO grouping sets — Min(numRows / 2, ndvProduct * parallelism) --- Case 4: column stats, hash aggregation, grouping sets — Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet) --- Case 5: column stats, NO hash aggregation, NO grouping sets — numRows --- Case 6: column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet - --- REDUCE SIDE: --- Case 7: NO column stats — numRows / 2 --- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet) --- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct) - -create table location (state string, country string, votes bigint) +PREHOOK: query: create table location (state string, country string, votes bigint) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@location -POSTHOOK: query: -- There are different cases for Group By depending on map/reduce side, hash aggregation, --- grouping sets and column stats. If we don't have column stats, we just assume hash --- aggregation is disabled. Following are the possible cases and rule for cardinality --- estimation - --- MAP SIDE: --- Case 1: NO column stats, NO hash aggregation, NO grouping sets — numRows --- Case 2: NO column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet --- Case 3: column stats, hash aggregation, NO grouping sets — Min(numRows / 2, ndvProduct * parallelism) --- Case 4: column stats, hash aggregation, grouping sets — Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet) --- Case 5: column stats, NO hash aggregation, NO grouping sets — numRows --- Case 6: column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet - --- REDUCE SIDE: --- Case 7: NO column stats — numRows / 2 --- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet) --- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct) - -create table location (state string, country string, votes bigint) +POSTHOOK: query: create table location (state string, country string, votes bigint) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@location @@ -70,13 +34,9 @@ POSTHOOK: query: analyze table location compute statistics for columns state, co POSTHOOK: type: QUERY POSTHOOK: Input: default@location #### A masked pattern was here #### -PREHOOK: query: -- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 20 --- Case 7: NO column stats - cardinality = 10 -explain select state, country from location group by state, country +PREHOOK: query: explain select state, country from location group by state, country PREHOOK: type: QUERY -POSTHOOK: query: -- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 20 --- Case 7: NO column stats - cardinality = 10 -explain select state, country from location group by state, country +POSTHOOK: query: explain select state, country from location group by state, country POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -123,13 +83,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 80 --- Case 7: NO column stats - cardinality = 40 -explain select state, country from location group by state, country with cube +PREHOOK: query: explain select state, country from location group by state, country with cube PREHOOK: type: QUERY -POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 80 --- Case 7: NO column stats - cardinality = 40 -explain select state, country from location group by state, country with cube +POSTHOOK: query: explain select state, country from location group by state, country with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -177,17 +133,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- parallelism = 4 - --- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 8 --- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select state, country from location group by state, country +PREHOOK: query: explain select state, country from location group by state, country PREHOOK: type: QUERY -POSTHOOK: query: -- parallelism = 4 - --- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 8 --- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select state, country from location group by state, country +POSTHOOK: query: explain select state, country from location group by state, country POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -234,15 +182,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- column stats for votes is missing, so ndvProduct becomes 0 and will be set to numRows / 2 --- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 10 --- Case 9: column stats, NO grouping sets - caridnality = 5 -explain select state, votes from location group by state, votes +PREHOOK: query: explain select state, votes from location group by state, votes PREHOOK: type: QUERY -POSTHOOK: query: -- column stats for votes is missing, so ndvProduct becomes 0 and will be set to numRows / 2 --- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 10 --- Case 9: column stats, NO grouping sets - caridnality = 5 -explain select state, votes from location group by state, votes +POSTHOOK: query: explain select state, votes from location group by state, votes POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -289,13 +231,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 4: column stats, hash aggregation, grouping sets - cardinality = 32 --- Case 8: column stats, grouping sets - cardinality = 8 -explain select state, country from location group by state, country with cube +PREHOOK: query: explain select state, country from location group by state, country with cube PREHOOK: type: QUERY -POSTHOOK: query: -- Case 4: column stats, hash aggregation, grouping sets - cardinality = 32 --- Case 8: column stats, grouping sets - cardinality = 8 -explain select state, country from location group by state, country with cube +POSTHOOK: query: explain select state, country from location group by state, country with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -343,13 +281,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 20 --- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select state, country from location group by state, country +PREHOOK: query: explain select state, country from location group by state, country PREHOOK: type: QUERY -POSTHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 20 --- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select state, country from location group by state, country +POSTHOOK: query: explain select state, country from location group by state, country POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -396,13 +330,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 80 --- Case 8: column stats, grouping sets - cardinality = 8 -explain select state, country from location group by state, country with cube +PREHOOK: query: explain select state, country from location group by state, country with cube PREHOOK: type: QUERY -POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 80 --- Case 8: column stats, grouping sets - cardinality = 8 -explain select state, country from location group by state, country with cube +POSTHOOK: query: explain select state, country from location group by state, country with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/annotate_stats_join.q.out b/ql/src/test/results/clientpositive/annotate_stats_join.q.out index 2b78e1f..5d4fe6c 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_join.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_join.q.out @@ -118,41 +118,9 @@ POSTHOOK: query: analyze table loc compute statistics for columns state,locid,zi POSTHOOK: type: QUERY POSTHOOK: Input: default@loc #### A masked pattern was here #### -PREHOOK: query: -- number of rows --- emp - 48 --- dept - 6 --- loc - 8 - --- count distincts for relevant columns (since count distinct values are approximate in some cases count distint values will be greater than number of rows) --- emp.deptid - 3 --- emp.lastname - 6 --- emp.locid - 7 --- dept.deptid - 7 --- dept.deptname - 6 --- loc.locid - 7 --- loc.state - 6 - --- 2 relations, 1 attribute --- Expected output rows: (48*6)/max(3,7) = 41 -explain select * from emp e join dept d on (e.deptid = d.deptid) +PREHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid) PREHOOK: type: QUERY -POSTHOOK: query: -- number of rows --- emp - 48 --- dept - 6 --- loc - 8 - --- count distincts for relevant columns (since count distinct values are approximate in some cases count distint values will be greater than number of rows) --- emp.deptid - 3 --- emp.lastname - 6 --- emp.locid - 7 --- dept.deptid - 7 --- dept.deptname - 6 --- loc.locid - 7 --- loc.state - 6 - --- 2 relations, 1 attribute --- Expected output rows: (48*6)/max(3,7) = 41 -explain select * from emp e join dept d on (e.deptid = d.deptid) +POSTHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -217,13 +185,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- 2 relations, 2 attributes --- Expected output rows: (48*6)/(max(3,7) * max(6,6)) = 6 -explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname +PREHOOK: query: explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname PREHOOK: type: QUERY -POSTHOOK: query: -- 2 relations, 2 attributes --- Expected output rows: (48*6)/(max(3,7) * max(6,6)) = 6 -explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname +POSTHOOK: query: explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -353,13 +317,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- 2 relations, 3 attributes --- Expected output rows: (48*6)/(max(3,7) * max(6,6) * max(6,6)) = 1 -explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +PREHOOK: query: explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname PREHOOK: type: QUERY -POSTHOOK: query: -- 2 relations, 3 attributes --- Expected output rows: (48*6)/(max(3,7) * max(6,6) * max(6,6)) = 1 -explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +POSTHOOK: query: explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -423,13 +383,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- 3 relations, 1 attribute --- Expected output rows: (48*6*48)/top2largest(3,7,3) = 658 -explain select * from emp e join dept d on (e.deptid = d.deptid) join emp e1 on (e.deptid = e1.deptid) +PREHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid) join emp e1 on (e.deptid = e1.deptid) PREHOOK: type: QUERY -POSTHOOK: query: -- 3 relations, 1 attribute --- Expected output rows: (48*6*48)/top2largest(3,7,3) = 658 -explain select * from emp e join dept d on (e.deptid = d.deptid) join emp e1 on (e.deptid = e1.deptid) +POSTHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid) join emp e1 on (e.deptid = e1.deptid) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -512,11 +468,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Expected output rows: (48*6*8)/top2largest(3,7,7) = 47 -explain select * from emp e join dept d on (e.deptid = d.deptid) join loc l on (e.deptid = l.locid) +PREHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid) join loc l on (e.deptid = l.locid) PREHOOK: type: QUERY -POSTHOOK: query: -- Expected output rows: (48*6*8)/top2largest(3,7,7) = 47 -explain select * from emp e join dept d on (e.deptid = d.deptid) join loc l on (e.deptid = l.locid) +POSTHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid) join loc l on (e.deptid = l.locid) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -599,13 +553,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- 3 relations and 2 attribute --- Expected output rows: (48*6*8)/top2largest(3,7,7)*top2largest(6,6,6) = 1 -explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc l on (e.deptid = l.locid and e.lastname = l.state) +PREHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc l on (e.deptid = l.locid and e.lastname = l.state) PREHOOK: type: QUERY -POSTHOOK: query: -- 3 relations and 2 attribute --- Expected output rows: (48*6*8)/top2largest(3,7,7)*top2largest(6,6,6) = 1 -explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc l on (e.deptid = l.locid and e.lastname = l.state) +POSTHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc l on (e.deptid = l.locid and e.lastname = l.state) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -687,11 +637,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- left outer join -explain select * from emp left outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +PREHOOK: query: explain select * from emp left outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname PREHOOK: type: QUERY -POSTHOOK: query: -- left outer join -explain select * from emp left outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +POSTHOOK: query: explain select * from emp left outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -749,11 +697,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- left semi join -explain select * from emp left semi join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +PREHOOK: query: explain select * from emp left semi join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname PREHOOK: type: QUERY -POSTHOOK: query: -- left semi join -explain select * from emp left semi join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +POSTHOOK: query: explain select * from emp left semi join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -822,11 +768,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- right outer join -explain select * from emp right outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +PREHOOK: query: explain select * from emp right outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname PREHOOK: type: QUERY -POSTHOOK: query: -- right outer join -explain select * from emp right outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +POSTHOOK: query: explain select * from emp right outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -884,11 +828,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- full outer join -explain select * from emp full outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +PREHOOK: query: explain select * from emp full outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname PREHOOK: type: QUERY -POSTHOOK: query: -- full outer join -explain select * from emp full outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +POSTHOOK: query: explain select * from emp full outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out index c581aff..f4b9ca4 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_join_pkfk.q.out @@ -10,10 +10,7 @@ PREHOOK: query: drop table customer_address PREHOOK: type: DROPTABLE POSTHOOK: query: drop table customer_address POSTHOOK: type: DROPTABLE -PREHOOK: query: -- s_store_sk is PK, ss_store_sk is FK --- ca_address_sk is PK, ss_addr_sk is FK - -create table store_sales +PREHOOK: query: create table store_sales ( ss_sold_date_sk int, ss_sold_time_sk int, @@ -43,10 +40,7 @@ row format delimited fields terminated by '|' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@store_sales -POSTHOOK: query: -- s_store_sk is PK, ss_store_sk is FK --- ca_address_sk is PK, ss_addr_sk is FK - -create table store_sales +POSTHOOK: query: create table store_sales ( ss_sold_date_sk int, ss_sold_time_sk int, diff --git a/ql/src/test/results/clientpositive/annotate_stats_limit.q.out b/ql/src/test/results/clientpositive/annotate_stats_limit.q.out index 04c097b..ea181cb 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_limit.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_limit.q.out @@ -60,11 +60,9 @@ POSTHOOK: query: analyze table loc_orc compute statistics for columns state, loc POSTHOOK: type: QUERY POSTHOOK: Input: default@loc_orc #### A masked pattern was here #### -PREHOOK: query: -- numRows: 8 rawDataSize: 796 -explain select * from loc_orc +PREHOOK: query: explain select * from loc_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 8 rawDataSize: 796 -explain select * from loc_orc +POSTHOOK: query: explain select * from loc_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -83,11 +81,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 4 rawDataSize: 396 -explain select * from loc_orc limit 4 +PREHOOK: query: explain select * from loc_orc limit 4 PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 4 rawDataSize: 396 -explain select * from loc_orc limit 4 +POSTHOOK: query: explain select * from loc_orc limit 4 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -109,13 +105,9 @@ STAGE PLANS: Statistics: Num rows: 4 Data size: 408 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- greater than the available number of rows --- numRows: 8 rawDataSize: 796 -explain select * from loc_orc limit 16 +PREHOOK: query: explain select * from loc_orc limit 16 PREHOOK: type: QUERY -POSTHOOK: query: -- greater than the available number of rows --- numRows: 8 rawDataSize: 796 -explain select * from loc_orc limit 16 +POSTHOOK: query: explain select * from loc_orc limit 16 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -137,11 +129,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 0 rawDataSize: 0 -explain select * from loc_orc limit 0 +PREHOOK: query: explain select * from loc_orc limit 0 PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 0 rawDataSize: 0 -explain select * from loc_orc limit 0 +POSTHOOK: query: explain select * from loc_orc limit 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/annotate_stats_part.q.out b/ql/src/test/results/clientpositive/annotate_stats_part.q.out index 77fbd3a..866d30a 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_part.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_part.q.out @@ -40,11 +40,9 @@ POSTHOOK: query: create table if not exists loc_orc ( POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@loc_orc -PREHOOK: query: -- basicStatState: NONE colStatState: NONE -explain select * from loc_orc +PREHOOK: query: explain select * from loc_orc PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: NONE colStatState: NONE -explain select * from loc_orc +POSTHOOK: query: explain select * from loc_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -78,15 +76,9 @@ POSTHOOK: Lineage: loc_orc PARTITION(year=2001).zip SIMPLE [(loc_staging)loc_sta POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).locid SIMPLE [(loc_staging)loc_staging.FieldSchema(name:locid, type:int, comment:null), ] POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).state SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ] POSTHOOK: Lineage: loc_orc PARTITION(year=__HIVE_DEFAULT_PARTITION__).zip SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, comment:null), ] -PREHOOK: query: -- stats are disabled. basic stats will report the file size but not raw data size. so initial statistics will be PARTIAL - --- basicStatState: PARTIAL colStatState: NONE -explain select * from loc_orc +PREHOOK: query: explain select * from loc_orc PREHOOK: type: QUERY -POSTHOOK: query: -- stats are disabled. basic stats will report the file size but not raw data size. so initial statistics will be PARTIAL - --- basicStatState: PARTIAL colStatState: NONE -explain select * from loc_orc +POSTHOOK: query: explain select * from loc_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -105,23 +97,19 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 1104 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: -- partition level analyze statistics for specific parition -analyze table loc_orc partition(year='2001') compute statistics +PREHOOK: query: analyze table loc_orc partition(year='2001') compute statistics PREHOOK: type: QUERY PREHOOK: Input: default@loc_orc PREHOOK: Output: default@loc_orc PREHOOK: Output: default@loc_orc@year=2001 -POSTHOOK: query: -- partition level analyze statistics for specific parition -analyze table loc_orc partition(year='2001') compute statistics +POSTHOOK: query: analyze table loc_orc partition(year='2001') compute statistics POSTHOOK: type: QUERY POSTHOOK: Input: default@loc_orc POSTHOOK: Output: default@loc_orc POSTHOOK: Output: default@loc_orc@year=2001 -PREHOOK: query: -- basicStatState: PARTIAL colStatState: NONE -explain select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__' +PREHOOK: query: explain select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__' PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: PARTIAL colStatState: NONE -explain select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__' +POSTHOOK: query: explain select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -140,11 +128,9 @@ STAGE PLANS: Statistics: Num rows: 3 Data size: 372 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- basicStatState: PARTIAL colStatState: NONE -explain select * from loc_orc +PREHOOK: query: explain select * from loc_orc PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: PARTIAL colStatState: NONE -explain select * from loc_orc +POSTHOOK: query: explain select * from loc_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -163,11 +149,9 @@ STAGE PLANS: Statistics: Num rows: 7 Data size: 1288 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE -explain select * from loc_orc where year='2001' +PREHOOK: query: explain select * from loc_orc where year='2001' PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: COMPLETE colStatState: NONE -explain select * from loc_orc where year='2001' +POSTHOOK: query: explain select * from loc_orc where year='2001' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -186,25 +170,21 @@ STAGE PLANS: Statistics: Num rows: 7 Data size: 734 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- partition level analyze statistics for all partitions -analyze table loc_orc partition(year) compute statistics +PREHOOK: query: analyze table loc_orc partition(year) compute statistics PREHOOK: type: QUERY PREHOOK: Input: default@loc_orc PREHOOK: Output: default@loc_orc PREHOOK: Output: default@loc_orc@year=2001 PREHOOK: Output: default@loc_orc@year=__HIVE_DEFAULT_PARTITION__ -POSTHOOK: query: -- partition level analyze statistics for all partitions -analyze table loc_orc partition(year) compute statistics +POSTHOOK: query: analyze table loc_orc partition(year) compute statistics POSTHOOK: type: QUERY POSTHOOK: Input: default@loc_orc POSTHOOK: Output: default@loc_orc POSTHOOK: Output: default@loc_orc@year=2001 POSTHOOK: Output: default@loc_orc@year=__HIVE_DEFAULT_PARTITION__ -PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE -explain select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__' +PREHOOK: query: explain select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__' PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: COMPLETE colStatState: NONE -explain select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__' +POSTHOOK: query: explain select * from loc_orc where year='__HIVE_DEFAULT_PARTITION__' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -223,11 +203,9 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 104 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE -explain select * from loc_orc +PREHOOK: query: explain select * from loc_orc PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: COMPLETE colStatState: NONE -explain select * from loc_orc +POSTHOOK: query: explain select * from loc_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -246,11 +224,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 1472 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE -explain select * from loc_orc where year='2001' or year='__HIVE_DEFAULT_PARTITION__' +PREHOOK: query: explain select * from loc_orc where year='2001' or year='__HIVE_DEFAULT_PARTITION__' PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: COMPLETE colStatState: NONE -explain select * from loc_orc where year='2001' or year='__HIVE_DEFAULT_PARTITION__' +POSTHOOK: query: explain select * from loc_orc where year='2001' or year='__HIVE_DEFAULT_PARTITION__' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -269,13 +245,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 1472 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: -- both partitions will be pruned --- basicStatState: NONE colStatState: NONE -explain select * from loc_orc where year='2001' and year='__HIVE_DEFAULT_PARTITION__' +PREHOOK: query: explain select * from loc_orc where year='2001' and year='__HIVE_DEFAULT_PARTITION__' PREHOOK: type: QUERY -POSTHOOK: query: -- both partitions will be pruned --- basicStatState: NONE colStatState: NONE -explain select * from loc_orc where year='2001' and year='__HIVE_DEFAULT_PARTITION__' +POSTHOOK: query: explain select * from loc_orc where year='2001' and year='__HIVE_DEFAULT_PARTITION__' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -297,23 +269,19 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: -- partition level partial column statistics -analyze table loc_orc partition(year='2001') compute statistics for columns state,locid +PREHOOK: query: analyze table loc_orc partition(year='2001') compute statistics for columns state,locid PREHOOK: type: QUERY PREHOOK: Input: default@loc_orc PREHOOK: Input: default@loc_orc@year=2001 #### A masked pattern was here #### -POSTHOOK: query: -- partition level partial column statistics -analyze table loc_orc partition(year='2001') compute statistics for columns state,locid +POSTHOOK: query: analyze table loc_orc partition(year='2001') compute statistics for columns state,locid POSTHOOK: type: QUERY POSTHOOK: Input: default@loc_orc POSTHOOK: Input: default@loc_orc@year=2001 #### A masked pattern was here #### -PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE -explain select zip from loc_orc +PREHOOK: query: explain select zip from loc_orc PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: COMPLETE colStatState: NONE -explain select zip from loc_orc +POSTHOOK: query: explain select zip from loc_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -332,11 +300,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 838 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL -explain select state from loc_orc +PREHOOK: query: explain select state from loc_orc PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL -explain select state from loc_orc +POSTHOOK: query: explain select state from loc_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -355,11 +321,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE -explain select year from loc_orc +PREHOOK: query: explain select year from loc_orc PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE -explain select year from loc_orc +POSTHOOK: query: explain select year from loc_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -378,13 +342,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 1472 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL --- basicStatState: COMPLETE colStatState: PARTIAL -explain select state,locid from loc_orc +PREHOOK: query: explain select state,locid from loc_orc PREHOOK: type: QUERY -POSTHOOK: query: -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL --- basicStatState: COMPLETE colStatState: PARTIAL -explain select state,locid from loc_orc +POSTHOOK: query: explain select state,locid from loc_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -403,11 +363,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE -explain select state,locid from loc_orc where year='2001' +PREHOOK: query: explain select state,locid from loc_orc where year='2001' PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE -explain select state,locid from loc_orc where year='2001' +POSTHOOK: query: explain select state,locid from loc_orc where year='2001' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -426,11 +384,9 @@ STAGE PLANS: Statistics: Num rows: 7 Data size: 630 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE -explain select state,locid from loc_orc where year!='2001' +PREHOOK: query: explain select state,locid from loc_orc where year!='2001' PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: COMPLETE colStatState: NONE -explain select state,locid from loc_orc where year!='2001' +POSTHOOK: query: explain select state,locid from loc_orc where year!='2001' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -449,11 +405,9 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 104 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL -explain select * from loc_orc +PREHOOK: query: explain select * from loc_orc PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL -explain select * from loc_orc +POSTHOOK: query: explain select * from loc_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -472,13 +426,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 2192 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: -- This is to test filter expression evaluation on partition column --- numRows: 2 dataSize: 8 basicStatState: COMPLETE colStatState: COMPLETE -explain select locid from loc_orc where locid>0 and year='2001' +PREHOOK: query: explain select locid from loc_orc where locid>0 and year='2001' PREHOOK: type: QUERY -POSTHOOK: query: -- This is to test filter expression evaluation on partition column --- numRows: 2 dataSize: 8 basicStatState: COMPLETE colStatState: COMPLETE -explain select locid from loc_orc where locid>0 and year='2001' +POSTHOOK: query: explain select locid from loc_orc where locid>0 and year='2001' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/annotate_stats_select.q.out b/ql/src/test/results/clientpositive/annotate_stats_select.q.out index 75401ae..873f1ab 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_select.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_select.q.out @@ -89,11 +89,9 @@ POSTHOOK: Lineage: alltypes_orc.st1 SIMPLE [(alltypes)alltypes.FieldSchema(name: POSTHOOK: Lineage: alltypes_orc.ti1 SIMPLE [(alltypes)alltypes.FieldSchema(name:ti1, type:tinyint, comment:null), ] POSTHOOK: Lineage: alltypes_orc.ts1 SIMPLE [(alltypes)alltypes.FieldSchema(name:ts1, type:timestamp, comment:null), ] POSTHOOK: Lineage: alltypes_orc.vc1 SIMPLE [(alltypes)alltypes.FieldSchema(name:vc1, type:varchar(5), comment:null), ] -PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE numRows: 2 rawDataSize: 1514 -explain select * from alltypes_orc +PREHOOK: query: explain select * from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: COMPLETE colStatState: NONE numRows: 2 rawDataSize: 1514 -explain select * from alltypes_orc +POSTHOOK: query: explain select * from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -112,21 +110,17 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- statistics for complex types are not supported yet -analyze table alltypes_orc compute statistics for columns bo1, ti1, si1, i1, bi1, f1, d1, s1, vc1 +PREHOOK: query: analyze table alltypes_orc compute statistics for columns bo1, ti1, si1, i1, bi1, f1, d1, s1, vc1 PREHOOK: type: QUERY PREHOOK: Input: default@alltypes_orc #### A masked pattern was here #### -POSTHOOK: query: -- statistics for complex types are not supported yet -analyze table alltypes_orc compute statistics for columns bo1, ti1, si1, i1, bi1, f1, d1, s1, vc1 +POSTHOOK: query: analyze table alltypes_orc compute statistics for columns bo1, ti1, si1, i1, bi1, f1, d1, s1, vc1 POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypes_orc #### A masked pattern was here #### -PREHOOK: query: -- numRows: 2 rawDataSize: 1514 -explain select * from alltypes_orc +PREHOOK: query: explain select * from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 1514 -explain select * from alltypes_orc +POSTHOOK: query: explain select * from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -145,11 +139,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 420 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: -- numRows: 2 rawDataSize: 8 -explain select bo1 from alltypes_orc +PREHOOK: query: explain select bo1 from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 8 -explain select bo1 from alltypes_orc +POSTHOOK: query: explain select bo1 from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -168,13 +160,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- col alias renaming --- numRows: 2 rawDataSize: 8 -explain select i1 as int1 from alltypes_orc +PREHOOK: query: explain select i1 as int1 from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- col alias renaming --- numRows: 2 rawDataSize: 8 -explain select i1 as int1 from alltypes_orc +POSTHOOK: query: explain select i1 as int1 from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -193,11 +181,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 2 rawDataSize: 174 -explain select s1 from alltypes_orc +PREHOOK: query: explain select s1 from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 174 -explain select s1 from alltypes_orc +POSTHOOK: query: explain select s1 from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -216,13 +202,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 174 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- column statistics for complex types unsupported and so statistics will not be updated --- numRows: 2 rawDataSize: 1514 -explain select m1 from alltypes_orc +PREHOOK: query: explain select m1 from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- column statistics for complex types unsupported and so statistics will not be updated --- numRows: 2 rawDataSize: 1514 -explain select m1 from alltypes_orc +POSTHOOK: query: explain select m1 from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -241,11 +223,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- numRows: 2 rawDataSize: 246 -explain select bo1, ti1, si1, i1, bi1, f1, d1,s1 from alltypes_orc +PREHOOK: query: explain select bo1, ti1, si1, i1, bi1, f1, d1,s1 from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 246 -explain select bo1, ti1, si1, i1, bi1, f1, d1,s1 from alltypes_orc +POSTHOOK: query: explain select bo1, ti1, si1, i1, bi1, f1, d1,s1 from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -264,11 +244,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 246 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 2 rawDataSize: 0 -explain select null from alltypes_orc +PREHOOK: query: explain select null from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 0 -explain select null from alltypes_orc +POSTHOOK: query: explain select null from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -287,11 +265,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 2 rawDataSize: 8 -explain select 11 from alltypes_orc +PREHOOK: query: explain select 11 from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 8 -explain select 11 from alltypes_orc +POSTHOOK: query: explain select 11 from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -310,11 +286,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 2 rawDataSize: 16 -explain select 11L from alltypes_orc +PREHOOK: query: explain select 11L from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 16 -explain select 11L from alltypes_orc +POSTHOOK: query: explain select 11L from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -333,11 +307,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 2 rawDataSize: 16 -explain select 11.0 from alltypes_orc +PREHOOK: query: explain select 11.0 from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 16 -explain select 11.0 from alltypes_orc +POSTHOOK: query: explain select 11.0 from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -356,11 +328,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 2 rawDataSize: 178 -explain select "hello" from alltypes_orc +PREHOOK: query: explain select "hello" from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 178 -explain select "hello" from alltypes_orc +POSTHOOK: query: explain select "hello" from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -421,11 +391,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 178 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 2 rawDataSize: 96 -explain select unbase64("0xe23") from alltypes_orc +PREHOOK: query: explain select unbase64("0xe23") from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 96 -explain select unbase64("0xe23") from alltypes_orc +POSTHOOK: query: explain select unbase64("0xe23") from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -444,11 +412,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 2 rawDataSize: 16 -explain select cast("1" as TINYINT), cast("20" as SMALLINT) from alltypes_orc +PREHOOK: query: explain select cast("1" as TINYINT), cast("20" as SMALLINT) from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 16 -explain select cast("1" as TINYINT), cast("20" as SMALLINT) from alltypes_orc +POSTHOOK: query: explain select cast("1" as TINYINT), cast("20" as SMALLINT) from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -467,11 +433,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 2 rawDataSize: 80 -explain select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from alltypes_orc +PREHOOK: query: explain select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 80 -explain select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from alltypes_orc +POSTHOOK: query: explain select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -490,11 +454,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 2 rawDataSize: 112 -explain select cast("1970-12-31 15:59:58.174" as DATE) from alltypes_orc +PREHOOK: query: explain select cast("1970-12-31 15:59:58.174" as DATE) from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 112 -explain select cast("1970-12-31 15:59:58.174" as DATE) from alltypes_orc +POSTHOOK: query: explain select cast("1970-12-31 15:59:58.174" as DATE) from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -513,11 +475,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 2 rawDataSize: 224 -explain select cast("58.174" as DECIMAL) from alltypes_orc +PREHOOK: query: explain select cast("58.174" as DECIMAL) from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 224 -explain select cast("58.174" as DECIMAL) from alltypes_orc +POSTHOOK: query: explain select cast("58.174" as DECIMAL) from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -536,11 +496,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 2 rawDataSize: 112 -explain select array(1,2,3) from alltypes_orc +PREHOOK: query: explain select array(1,2,3) from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 112 -explain select array(1,2,3) from alltypes_orc +POSTHOOK: query: explain select array(1,2,3) from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -571,11 +529,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- numRows: 2 rawDataSize: 1508 -explain select str_to_map("a=1 b=2 c=3", " ", "=") from alltypes_orc +PREHOOK: query: explain select str_to_map("a=1 b=2 c=3", " ", "=") from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 1508 -explain select str_to_map("a=1 b=2 c=3", " ", "=") from alltypes_orc +POSTHOOK: query: explain select str_to_map("a=1 b=2 c=3", " ", "=") from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -606,11 +562,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- numRows: 2 rawDataSize: 112 -explain select NAMED_STRUCT("a", 11, "b", 11) from alltypes_orc +PREHOOK: query: explain select NAMED_STRUCT("a", 11, "b", 11) from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 112 -explain select NAMED_STRUCT("a", 11, "b", 11) from alltypes_orc +POSTHOOK: query: explain select NAMED_STRUCT("a", 11, "b", 11) from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -641,11 +595,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- numRows: 2 rawDataSize: 250 -explain select CREATE_UNION(0, "hello") from alltypes_orc +PREHOOK: query: explain select CREATE_UNION(0, "hello") from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 250 -explain select CREATE_UNION(0, "hello") from alltypes_orc +POSTHOOK: query: explain select CREATE_UNION(0, "hello") from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -676,13 +628,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- COUNT(*) is projected as new column. It is not projected as GenericUDF and so datasize estimate will be based on number of rows --- numRows: 1 rawDataSize: 8 -explain select count(*) from alltypes_orc +PREHOOK: query: explain select count(*) from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- COUNT(*) is projected as new column. It is not projected as GenericUDF and so datasize estimate will be based on number of rows --- numRows: 1 rawDataSize: 8 -explain select count(*) from alltypes_orc +POSTHOOK: query: explain select count(*) from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -694,13 +642,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- COUNT(1) is projected as new column. It is not projected as GenericUDF and so datasize estimate will be based on number of rows --- numRows: 1 rawDataSize: 8 -explain select count(1) from alltypes_orc +PREHOOK: query: explain select count(1) from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- COUNT(1) is projected as new column. It is not projected as GenericUDF and so datasize estimate will be based on number of rows --- numRows: 1 rawDataSize: 8 -explain select count(1) from alltypes_orc +POSTHOOK: query: explain select count(1) from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -712,13 +656,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- column statistics for complex column types will be missing. data size will be calculated from available column statistics --- numRows: 2 rawDataSize: 254 -explain select *,11 from alltypes_orc +PREHOOK: query: explain select *,11 from alltypes_orc PREHOOK: type: QUERY -POSTHOOK: query: -- column statistics for complex column types will be missing. data size will be calculated from available column statistics --- numRows: 2 rawDataSize: 254 -explain select *,11 from alltypes_orc +POSTHOOK: query: explain select *,11 from alltypes_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -737,15 +677,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 428 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: -- subquery selects --- inner select - numRows: 2 rawDataSize: 8 --- outer select - numRows: 2 rawDataSize: 8 -explain select i1 from (select i1 from alltypes_orc limit 10) temp +PREHOOK: query: explain select i1 from (select i1 from alltypes_orc limit 10) temp PREHOOK: type: QUERY -POSTHOOK: query: -- subquery selects --- inner select - numRows: 2 rawDataSize: 8 --- outer select - numRows: 2 rawDataSize: 8 -explain select i1 from (select i1 from alltypes_orc limit 10) temp +POSTHOOK: query: explain select i1 from (select i1 from alltypes_orc limit 10) temp POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -767,13 +701,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- inner select - numRows: 2 rawDataSize: 16 --- outer select - numRows: 2 rawDataSize: 8 -explain select i1 from (select i1,11 from alltypes_orc limit 10) temp +PREHOOK: query: explain select i1 from (select i1,11 from alltypes_orc limit 10) temp PREHOOK: type: QUERY -POSTHOOK: query: -- inner select - numRows: 2 rawDataSize: 16 --- outer select - numRows: 2 rawDataSize: 8 -explain select i1 from (select i1,11 from alltypes_orc limit 10) temp +POSTHOOK: query: explain select i1 from (select i1,11 from alltypes_orc limit 10) temp POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -795,13 +725,9 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- inner select - numRows: 2 rawDataSize: 16 --- outer select - numRows: 2 rawDataSize: 186 -explain select i1,"hello" from (select i1,11 from alltypes_orc limit 10) temp +PREHOOK: query: explain select i1,"hello" from (select i1,11 from alltypes_orc limit 10) temp PREHOOK: type: QUERY -POSTHOOK: query: -- inner select - numRows: 2 rawDataSize: 16 --- outer select - numRows: 2 rawDataSize: 186 -explain select i1,"hello" from (select i1,11 from alltypes_orc limit 10) temp +POSTHOOK: query: explain select i1,"hello" from (select i1,11 from alltypes_orc limit 10) temp POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -852,13 +778,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- inner select - numRows: 2 rawDataSize: 24 --- outer select - numRows: 2 rawDataSize: 16 -explain select x from (select i1,11.0 as x from alltypes_orc limit 10) temp +PREHOOK: query: explain select x from (select i1,11.0 as x from alltypes_orc limit 10) temp PREHOOK: type: QUERY -POSTHOOK: query: -- inner select - numRows: 2 rawDataSize: 24 --- outer select - numRows: 2 rawDataSize: 16 -explain select x from (select i1,11.0 as x from alltypes_orc limit 10) temp +POSTHOOK: query: explain select x from (select i1,11.0 as x from alltypes_orc limit 10) temp POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -902,13 +824,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- inner select - numRows: 2 rawDataSize: 104 --- outer select - numRows: 2 rawDataSize: 186 -explain select x,"hello" from (select i1 as x, unbase64("0xe23") as ub from alltypes_orc limit 10) temp +PREHOOK: query: explain select x,"hello" from (select i1 as x, unbase64("0xe23") as ub from alltypes_orc limit 10) temp PREHOOK: type: QUERY -POSTHOOK: query: -- inner select - numRows: 2 rawDataSize: 104 --- outer select - numRows: 2 rawDataSize: 186 -explain select x,"hello" from (select i1 as x, unbase64("0xe23") as ub from alltypes_orc limit 10) temp +POSTHOOK: query: explain select x,"hello" from (select i1 as x, unbase64("0xe23") as ub from alltypes_orc limit 10) temp POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -959,15 +877,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- inner select - numRows: 2 rawDataSize: 186 --- middle select - numRows: 2 rawDataSize: 178 --- outer select - numRows: 2 rawDataSize: 194 -explain select h, 11.0 from (select hell as h from (select i1, "hello" as hell from alltypes_orc limit 10) in1 limit 10) in2 +PREHOOK: query: explain select h, 11.0 from (select hell as h from (select i1, "hello" as hell from alltypes_orc limit 10) in1 limit 10) in2 PREHOOK: type: QUERY -POSTHOOK: query: -- inner select - numRows: 2 rawDataSize: 186 --- middle select - numRows: 2 rawDataSize: 178 --- outer select - numRows: 2 rawDataSize: 194 -explain select h, 11.0 from (select hell as h from (select i1, "hello" as hell from alltypes_orc limit 10) in1 limit 10) in2 +POSTHOOK: query: explain select h, 11.0 from (select hell as h from (select i1, "hello" as hell from alltypes_orc limit 10) in1 limit 10) in2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1034,13 +946,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- This test is for FILTER operator where filter expression is a boolean column --- numRows: 2 rawDataSize: 8 -explain select bo1 from alltypes_orc where bo1 +PREHOOK: query: explain select bo1 from alltypes_orc where bo1 PREHOOK: type: QUERY -POSTHOOK: query: -- This test is for FILTER operator where filter expression is a boolean column --- numRows: 2 rawDataSize: 8 -explain select bo1 from alltypes_orc where bo1 +POSTHOOK: query: explain select bo1 from alltypes_orc where bo1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1074,11 +982,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- numRows: 0 rawDataSize: 0 -explain select bo1 from alltypes_orc where !bo1 +PREHOOK: query: explain select bo1 from alltypes_orc where !bo1 PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 0 rawDataSize: 0 -explain select bo1 from alltypes_orc where !bo1 +POSTHOOK: query: explain select bo1 from alltypes_orc where !bo1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/annotate_stats_table.q.out b/ql/src/test/results/clientpositive/annotate_stats_table.q.out index 6db4ded..efc3c1f 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_table.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_table.q.out @@ -28,11 +28,9 @@ POSTHOOK: query: alter table emp_orc set fileformat orc POSTHOOK: type: ALTERTABLE_FILEFORMAT POSTHOOK: Input: default@emp_orc POSTHOOK: Output: default@emp_orc -PREHOOK: query: -- basicStatState: NONE colStatState: NONE -explain select * from emp_orc +PREHOOK: query: explain select * from emp_orc PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: NONE colStatState: NONE -explain select * from emp_orc +POSTHOOK: query: explain select * from emp_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -69,15 +67,9 @@ POSTHOOK: Input: default@emp_staging POSTHOOK: Output: default@emp_orc POSTHOOK: Lineage: emp_orc.deptid SIMPLE [(emp_staging)emp_staging.FieldSchema(name:deptid, type:int, comment:null), ] POSTHOOK: Lineage: emp_orc.lastname SIMPLE [(emp_staging)emp_staging.FieldSchema(name:lastname, type:string, comment:null), ] -PREHOOK: query: -- stats are disabled. basic stats will report the file size but not raw data size. so initial statistics will be PARTIAL - --- basicStatState: PARTIAL colStatState: NONE -explain select * from emp_orc +PREHOOK: query: explain select * from emp_orc PREHOOK: type: QUERY -POSTHOOK: query: -- stats are disabled. basic stats will report the file size but not raw data size. so initial statistics will be PARTIAL - --- basicStatState: PARTIAL colStatState: NONE -explain select * from emp_orc +POSTHOOK: query: explain select * from emp_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -96,21 +88,17 @@ STAGE PLANS: Statistics: Num rows: 3 Data size: 394 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- table level analyze statistics -analyze table emp_orc compute statistics +PREHOOK: query: analyze table emp_orc compute statistics PREHOOK: type: QUERY PREHOOK: Input: default@emp_orc PREHOOK: Output: default@emp_orc -POSTHOOK: query: -- table level analyze statistics -analyze table emp_orc compute statistics +POSTHOOK: query: analyze table emp_orc compute statistics POSTHOOK: type: QUERY POSTHOOK: Input: default@emp_orc POSTHOOK: Output: default@emp_orc -PREHOOK: query: -- basicStatState: COMPLETE colStatState: NONE -explain select * from emp_orc +PREHOOK: query: explain select * from emp_orc PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: COMPLETE colStatState: NONE -explain select * from emp_orc +POSTHOOK: query: explain select * from emp_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -129,21 +117,17 @@ STAGE PLANS: Statistics: Num rows: 48 Data size: 4512 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- column level partial statistics -analyze table emp_orc compute statistics for columns deptid +PREHOOK: query: analyze table emp_orc compute statistics for columns deptid PREHOOK: type: QUERY PREHOOK: Input: default@emp_orc #### A masked pattern was here #### -POSTHOOK: query: -- column level partial statistics -analyze table emp_orc compute statistics for columns deptid +POSTHOOK: query: analyze table emp_orc compute statistics for columns deptid POSTHOOK: type: QUERY POSTHOOK: Input: default@emp_orc #### A masked pattern was here #### -PREHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL -explain select * from emp_orc +PREHOOK: query: explain select * from emp_orc PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL -explain select * from emp_orc +POSTHOOK: query: explain select * from emp_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -162,13 +146,9 @@ STAGE PLANS: Statistics: Num rows: 48 Data size: 192 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: -- all selected columns have statistics --- basicStatState: COMPLETE colStatState: COMPLETE -explain select deptid from emp_orc +PREHOOK: query: explain select deptid from emp_orc PREHOOK: type: QUERY -POSTHOOK: query: -- all selected columns have statistics --- basicStatState: COMPLETE colStatState: COMPLETE -explain select deptid from emp_orc +POSTHOOK: query: explain select deptid from emp_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -187,21 +167,17 @@ STAGE PLANS: Statistics: Num rows: 48 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- column level complete statistics -analyze table emp_orc compute statistics for columns lastname,deptid +PREHOOK: query: analyze table emp_orc compute statistics for columns lastname,deptid PREHOOK: type: QUERY PREHOOK: Input: default@emp_orc #### A masked pattern was here #### -POSTHOOK: query: -- column level complete statistics -analyze table emp_orc compute statistics for columns lastname,deptid +POSTHOOK: query: analyze table emp_orc compute statistics for columns lastname,deptid POSTHOOK: type: QUERY POSTHOOK: Input: default@emp_orc #### A masked pattern was here #### -PREHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE -explain select * from emp_orc +PREHOOK: query: explain select * from emp_orc PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE -explain select * from emp_orc +POSTHOOK: query: explain select * from emp_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -220,11 +196,9 @@ STAGE PLANS: Statistics: Num rows: 48 Data size: 4560 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE -explain select lastname from emp_orc +PREHOOK: query: explain select lastname from emp_orc PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE -explain select lastname from emp_orc +POSTHOOK: query: explain select lastname from emp_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -243,11 +217,9 @@ STAGE PLANS: Statistics: Num rows: 48 Data size: 4368 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE -explain select deptid from emp_orc +PREHOOK: query: explain select deptid from emp_orc PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE -explain select deptid from emp_orc +POSTHOOK: query: explain select deptid from emp_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -266,11 +238,9 @@ STAGE PLANS: Statistics: Num rows: 48 Data size: 192 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE -explain select lastname,deptid from emp_orc +PREHOOK: query: explain select lastname,deptid from emp_orc PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: COMPLETE colStatState: COMPLETE -explain select lastname,deptid from emp_orc +POSTHOOK: query: explain select lastname,deptid from emp_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/annotate_stats_union.q.out b/ql/src/test/results/clientpositive/annotate_stats_union.q.out index c03aa84..059f261 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_union.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_union.q.out @@ -60,11 +60,9 @@ POSTHOOK: query: analyze table loc_orc compute statistics for columns state,loci POSTHOOK: type: QUERY POSTHOOK: Input: default@loc_orc #### A masked pattern was here #### -PREHOOK: query: -- numRows: 8 rawDataSize: 688 -explain select state from loc_orc +PREHOOK: query: explain select state from loc_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 8 rawDataSize: 688 -explain select state from loc_orc +POSTHOOK: query: explain select state from loc_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -83,11 +81,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 16 rawDataSize: 1376 -explain select * from (select state from loc_orc union all select state from loc_orc) tmp +PREHOOK: query: explain select * from (select state from loc_orc union all select state from loc_orc) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 16 rawDataSize: 1376 -explain select * from (select state from loc_orc union all select state from loc_orc) tmp +POSTHOOK: query: explain select * from (select state from loc_orc union all select state from loc_orc) tmp POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -136,11 +132,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- numRows: 8 rawDataSize: 796 -explain select * from loc_orc +PREHOOK: query: explain select * from loc_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 8 rawDataSize: 796 -explain select * from loc_orc +POSTHOOK: query: explain select * from loc_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -159,11 +153,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 16 rawDataSize: 1592 -explain select * from (select * from loc_orc union all select * from loc_orc) tmp +PREHOOK: query: explain select * from (select * from loc_orc union all select * from loc_orc) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 16 rawDataSize: 1592 -explain select * from (select * from loc_orc union all select * from loc_orc) tmp +POSTHOOK: query: explain select * from (select * from loc_orc union all select * from loc_orc) tmp POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -302,11 +294,9 @@ POSTHOOK: query: analyze table loc_orc compute statistics for columns state,loci POSTHOOK: type: QUERY POSTHOOK: Input: test@loc_orc #### A masked pattern was here #### -PREHOOK: query: -- numRows: 16 rawDataSize: 1376 -explain select * from (select state from default.loc_orc union all select state from test.loc_orc) temp +PREHOOK: query: explain select * from (select state from default.loc_orc union all select state from test.loc_orc) temp PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 16 rawDataSize: 1376 -explain select * from (select state from default.loc_orc union all select state from test.loc_orc) temp +POSTHOOK: query: explain select * from (select state from default.loc_orc union all select state from test.loc_orc) temp POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -355,11 +345,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- numRows: 16 rawDataSize: 1376 -explain select * from (select state from test.loc_staging union all select state from test.loc_orc) temp +PREHOOK: query: explain select * from (select state from test.loc_staging union all select state from test.loc_orc) temp PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 16 rawDataSize: 1376 -explain select * from (select state from test.loc_staging union all select state from test.loc_orc) temp +POSTHOOK: query: explain select * from (select state from test.loc_staging union all select state from test.loc_orc) temp POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/ansi_sql_arithmetic.q.out b/ql/src/test/results/clientpositive/ansi_sql_arithmetic.q.out index dd12b02..769bce0 100644 --- a/ql/src/test/results/clientpositive/ansi_sql_arithmetic.q.out +++ b/ql/src/test/results/clientpositive/ansi_sql_arithmetic.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- With ansi sql arithmetic enabled, int / int => exact numeric type -explain select cast(key as int) / cast(key as int) from src limit 1 +PREHOOK: query: explain select cast(key as int) / cast(key as int) from src limit 1 PREHOOK: type: QUERY -POSTHOOK: query: -- With ansi sql arithmetic enabled, int / int => exact numeric type -explain select cast(key as int) / cast(key as int) from src limit 1 +POSTHOOK: query: explain select cast(key as int) / cast(key as int) from src limit 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -45,11 +43,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 1.00000000000 -PREHOOK: query: -- With ansi sql arithmetic disabled, int / int => double -explain select cast(key as int) / cast(key as int) from src limit 1 +PREHOOK: query: explain select cast(key as int) / cast(key as int) from src limit 1 PREHOOK: type: QUERY -POSTHOOK: query: -- With ansi sql arithmetic disabled, int / int => double -explain select cast(key as int) / cast(key as int) from src limit 1 +POSTHOOK: query: explain select cast(key as int) / cast(key as int) from src limit 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/archive_multi.q.out b/ql/src/test/results/clientpositive/archive_multi.q.out index 38f3f1a..5222c33 100644 --- a/ql/src/test/results/clientpositive/archive_multi.q.out +++ b/ql/src/test/results/clientpositive/archive_multi.q.out @@ -86,18 +86,14 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 POSTHOOK: Output: ac_test@tstsrcpart@ds=2008-04-09/hr=12 POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tstsrcpart PARTITION(ds=2008-04-09,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19) - -SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col +PREHOOK: query: SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col FROM (SELECT * FROM ac_test.tstsrcpart WHERE ds='2008-04-08') subq1) subq2 PREHOOK: type: QUERY PREHOOK: Input: ac_test@tstsrcpart PREHOOK: Input: ac_test@tstsrcpart@ds=2008-04-08/hr=11 PREHOOK: Input: ac_test@tstsrcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### -POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19) - -SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col +POSTHOOK: query: SELECT SUM(hash(col)) FROM (SELECT transform(*) using 'tr "\t" "_"' AS col FROM (SELECT * FROM ac_test.tstsrcpart WHERE ds='2008-04-08') subq1) subq2 POSTHOOK: type: QUERY POSTHOOK: Input: ac_test@tstsrcpart diff --git a/ql/src/test/results/clientpositive/authorization_1.q.out b/ql/src/test/results/clientpositive/authorization_1.q.out index 5d73485..90cca5a 100644 --- a/ql/src/test/results/clientpositive/authorization_1.q.out +++ b/ql/src/test/results/clientpositive/authorization_1.q.out @@ -1,27 +1,19 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF - -create table src_autho_test as select * from src +PREHOOK: query: create table src_autho_test as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@src_autho_test -POSTHOOK: query: -- SORT_BEFORE_DIFF - -create table src_autho_test as select * from src +POSTHOOK: query: create table src_autho_test as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default POSTHOOK: Output: default@src_autho_test POSTHOOK: Lineage: src_autho_test.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: src_autho_test.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: --table grant to user - -grant select on table src_autho_test to user hive_test_user +PREHOOK: query: grant select on table src_autho_test to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --table grant to user - -grant select on table src_autho_test to user hive_test_user +POSTHOOK: query: grant select on table src_autho_test to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: show grant user hive_test_user on table src_autho_test @@ -75,14 +67,10 @@ PREHOOK: query: show grant user hive_test_user on table src_autho_test(key) PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant user hive_test_user on table src_autho_test(key) POSTHOOK: type: SHOW_GRANT -PREHOOK: query: --column grant to user - -grant select(key) on table src_autho_test to user hive_test_user +PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --column grant to user - -grant select(key) on table src_autho_test to user hive_test_user +POSTHOOK: query: grant select(key) on table src_autho_test to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: show grant user hive_test_user on table src_autho_test @@ -136,14 +124,10 @@ PREHOOK: query: show grant user hive_test_user on table src_autho_test(key) PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant user hive_test_user on table src_autho_test(key) POSTHOOK: type: SHOW_GRANT -PREHOOK: query: --table grant to group - -grant select on table src_autho_test to group hive_test_group1 +PREHOOK: query: grant select on table src_autho_test to group hive_test_group1 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --table grant to group - -grant select on table src_autho_test to group hive_test_group1 +POSTHOOK: query: grant select on table src_autho_test to group hive_test_group1 POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: show grant group hive_test_group1 on table src_autho_test @@ -197,14 +181,10 @@ PREHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) POSTHOOK: type: SHOW_GRANT -PREHOOK: query: --column grant to group - -grant select(key) on table src_autho_test to group hive_test_group1 +PREHOOK: query: grant select(key) on table src_autho_test to group hive_test_group1 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --column grant to group - -grant select(key) on table src_autho_test to group hive_test_group1 +POSTHOOK: query: grant select(key) on table src_autho_test to group hive_test_group1 POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: show grant group hive_test_group1 on table src_autho_test @@ -258,11 +238,9 @@ PREHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant group hive_test_group1 on table src_autho_test(key) POSTHOOK: type: SHOW_GRANT -PREHOOK: query: --role -create role sRc_roLE +PREHOOK: query: create role sRc_roLE PREHOOK: type: CREATEROLE -POSTHOOK: query: --role -create role sRc_roLE +POSTHOOK: query: create role sRc_roLE POSTHOOK: type: CREATEROLE PREHOOK: query: grant role sRc_roLE to user hive_test_user PREHOOK: type: GRANT_ROLE @@ -274,14 +252,10 @@ POSTHOOK: query: show role grant user hive_test_user POSTHOOK: type: SHOW_ROLE_GRANT public false -1 sRc_roLE false -1 hive_test_user -PREHOOK: query: --column grant to role - -grant select(key) on table src_autho_test to role sRc_roLE +PREHOOK: query: grant select(key) on table src_autho_test to role sRc_roLE PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --column grant to role - -grant select(key) on table src_autho_test to role sRc_roLE +POSTHOOK: query: grant select(key) on table src_autho_test to role sRc_roLE POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: show grant role sRc_roLE on table src_autho_test @@ -327,14 +301,10 @@ PREHOOK: Output: default@src_autho_test POSTHOOK: query: revoke select(key) on table src_autho_test from role sRc_roLE POSTHOOK: type: REVOKE_PRIVILEGE POSTHOOK: Output: default@src_autho_test -PREHOOK: query: --table grant to role - -grant select on table src_autho_test to role sRc_roLE +PREHOOK: query: grant select on table src_autho_test to role sRc_roLE PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --table grant to role - -grant select on table src_autho_test to role sRc_roLE +POSTHOOK: query: grant select on table src_autho_test to role sRc_roLE POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: select key from src_autho_test order by key limit 20 @@ -380,11 +350,9 @@ PREHOOK: Output: default@src_autho_test POSTHOOK: query: revoke select on table src_autho_test from role sRc_roLE POSTHOOK: type: REVOKE_PRIVILEGE POSTHOOK: Output: default@src_autho_test -PREHOOK: query: -- drop role -drop role sRc_roLE +PREHOOK: query: drop role sRc_roLE PREHOOK: type: DROPROLE -POSTHOOK: query: -- drop role -drop role sRc_roLE +POSTHOOK: query: drop role sRc_roLE POSTHOOK: type: DROPROLE PREHOOK: query: drop table src_autho_test PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/authorization_1_sql_std.q.out b/ql/src/test/results/clientpositive/authorization_1_sql_std.q.out index 2315fd4..18f8d44 100644 --- a/ql/src/test/results/clientpositive/authorization_1_sql_std.q.out +++ b/ql/src/test/results/clientpositive/authorization_1_sql_std.q.out @@ -6,13 +6,11 @@ POSTHOOK: query: create table src_autho_test (key STRING, value STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_autho_test -PREHOOK: query: --select dummy table -select 1 +PREHOOK: query: select 1 PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: --select dummy table -select 1 +POSTHOOK: query: select 1 POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### @@ -21,14 +19,10 @@ PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role ADMIN POSTHOOK: type: SHOW_ROLES -PREHOOK: query: --table grant to user - -grant select on table src_autho_test to user user_sauth +PREHOOK: query: grant select on table src_autho_test to user user_sauth PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --table grant to user - -grant select on table src_autho_test to user user_sauth +POSTHOOK: query: grant select on table src_autho_test to user user_sauth POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: show grant user user_sauth on table src_autho_test @@ -46,11 +40,9 @@ PREHOOK: query: show grant user user_sauth on table src_autho_test PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant user user_sauth on table src_autho_test POSTHOOK: type: SHOW_GRANT -PREHOOK: query: --role -create role src_role +PREHOOK: query: create role src_role PREHOOK: type: CREATEROLE -POSTHOOK: query: --role -create role src_role +POSTHOOK: query: create role src_role POSTHOOK: type: CREATEROLE PREHOOK: query: grant role src_role to user user_sauth PREHOOK: type: GRANT_ROLE @@ -62,16 +54,10 @@ POSTHOOK: query: show role grant user user_sauth POSTHOOK: type: SHOW_ROLE_GRANT public false -1 src_role false -1 hive_admin_user -PREHOOK: query: --table grant to role - --- also verify case insesitive behavior of role name -grant select on table src_autho_test to role Src_ROle +PREHOOK: query: grant select on table src_autho_test to role Src_ROle PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --table grant to role - --- also verify case insesitive behavior of role name -grant select on table src_autho_test to role Src_ROle +POSTHOOK: query: grant select on table src_autho_test to role Src_ROle POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: show grant role src_role on table src_autho_test @@ -85,11 +71,9 @@ PREHOOK: Output: default@src_autho_test POSTHOOK: query: revoke select on table src_autho_test from role src_rolE POSTHOOK: type: REVOKE_PRIVILEGE POSTHOOK: Output: default@src_autho_test -PREHOOK: query: -- drop role -drop role SRc_role +PREHOOK: query: drop role SRc_role PREHOOK: type: DROPROLE -POSTHOOK: query: -- drop role -drop role SRc_role +POSTHOOK: query: drop role SRc_role POSTHOOK: type: DROPROLE PREHOOK: query: drop table src_autho_test PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/authorization_3.q.out b/ql/src/test/results/clientpositive/authorization_3.q.out index 14c1466..181a512 100644 --- a/ql/src/test/results/clientpositive/authorization_3.q.out +++ b/ql/src/test/results/clientpositive/authorization_3.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF - -create table src_autho_test as select * from src +PREHOOK: query: create table src_autho_test as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@src_autho_test -POSTHOOK: query: -- SORT_BEFORE_DIFF - -create table src_autho_test as select * from src +POSTHOOK: query: create table src_autho_test as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/authorization_4.q.out b/ql/src/test/results/clientpositive/authorization_4.q.out index 7bf00cd..aade134 100644 --- a/ql/src/test/results/clientpositive/authorization_4.q.out +++ b/ql/src/test/results/clientpositive/authorization_4.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF - -create table src_autho_test as select * from src +PREHOOK: query: create table src_autho_test as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@src_autho_test -POSTHOOK: query: -- SORT_BEFORE_DIFF - -create table src_autho_test as select * from src +POSTHOOK: query: create table src_autho_test as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/authorization_5.q.out b/ql/src/test/results/clientpositive/authorization_5.q.out index 7917dba..1bfbcc7 100644 --- a/ql/src/test/results/clientpositive/authorization_5.q.out +++ b/ql/src/test/results/clientpositive/authorization_5.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF - -CREATE DATABASE IF NOT EXISTS test_db COMMENT 'Hive test database' +PREHOOK: query: CREATE DATABASE IF NOT EXISTS test_db COMMENT 'Hive test database' PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:test_db -POSTHOOK: query: -- SORT_BEFORE_DIFF - -CREATE DATABASE IF NOT EXISTS test_db COMMENT 'Hive test database' +POSTHOOK: query: CREATE DATABASE IF NOT EXISTS test_db COMMENT 'Hive test database' POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:test_db PREHOOK: query: SHOW DATABASES diff --git a/ql/src/test/results/clientpositive/authorization_6.q.out b/ql/src/test/results/clientpositive/authorization_6.q.out index bfe9f76..cbd218f 100644 --- a/ql/src/test/results/clientpositive/authorization_6.q.out +++ b/ql/src/test/results/clientpositive/authorization_6.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF - -create table src_auth_tmp as select * from src +PREHOOK: query: create table src_auth_tmp as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@src_auth_tmp -POSTHOOK: query: -- SORT_BEFORE_DIFF - -create table src_auth_tmp as select * from src +POSTHOOK: query: create table src_auth_tmp as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default @@ -36,12 +32,10 @@ PREHOOK: Output: default@src_auth_tmp POSTHOOK: query: grant select on table src_auth_tmp to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_auth_tmp -PREHOOK: query: -- column grant to user -grant Create on table authorization_part to user hive_test_user +PREHOOK: query: grant Create on table authorization_part to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@authorization_part -POSTHOOK: query: -- column grant to user -grant Create on table authorization_part to user hive_test_user +POSTHOOK: query: grant Create on table authorization_part to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@authorization_part PREHOOK: query: grant Update on table authorization_part to user hive_test_user diff --git a/ql/src/test/results/clientpositive/authorization_9.q.out b/ql/src/test/results/clientpositive/authorization_9.q.out index 6bb8ecb..39e0a56 100644 --- a/ql/src/test/results/clientpositive/authorization_9.q.out +++ b/ql/src/test/results/clientpositive/authorization_9.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF - -create table dummy (key string, value string) +PREHOOK: query: create table dummy (key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dummy -POSTHOOK: query: -- SORT_BEFORE_DIFF - -create table dummy (key string, value string) +POSTHOOK: query: create table dummy (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dummy diff --git a/ql/src/test/results/clientpositive/authorization_admin_almighty1.q.out b/ql/src/test/results/clientpositive/authorization_admin_almighty1.q.out index 53619c3..c5b04e6 100644 --- a/ql/src/test/results/clientpositive/authorization_admin_almighty1.q.out +++ b/ql/src/test/results/clientpositive/authorization_admin_almighty1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- actions from admin should work as if admin has all privileges - -create table t1(i int) +PREHOOK: query: create table t1(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- actions from admin should work as if admin has all privileges - -create table t1(i int) +POSTHOOK: query: create table t1(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 diff --git a/ql/src/test/results/clientpositive/authorization_admin_almighty2.q.out b/ql/src/test/results/clientpositive/authorization_admin_almighty2.q.out index be6f12a..8129a1f 100644 --- a/ql/src/test/results/clientpositive/authorization_admin_almighty2.q.out +++ b/ql/src/test/results/clientpositive/authorization_admin_almighty2.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- test commands such as dfs,add,delete,compile allowed only by admin user, after following statement -use default +PREHOOK: query: use default PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:default -POSTHOOK: query: -- test commands such as dfs,add,delete,compile allowed only by admin user, after following statement -use default +POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default PREHOOK: query: set role admin diff --git a/ql/src/test/results/clientpositive/authorization_alter_table_exchange_partition.q.out b/ql/src/test/results/clientpositive/authorization_alter_table_exchange_partition.q.out index 22b66d1..e838056 100644 --- a/ql/src/test/results/clientpositive/authorization_alter_table_exchange_partition.q.out +++ b/ql/src/test/results/clientpositive/authorization_alter_table_exchange_partition.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- create a table owned by user1 - -create table exchange_partition_test_1(a int) partitioned by (b int) +PREHOOK: query: create table exchange_partition_test_1(a int) partitioned by (b int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@exchange_partition_test_1 -POSTHOOK: query: -- create a table owned by user1 - -create table exchange_partition_test_1(a int) partitioned by (b int) +POSTHOOK: query: create table exchange_partition_test_1(a int) partitioned by (b int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@exchange_partition_test_1 @@ -22,27 +18,19 @@ PREHOOK: query: set role admin PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role admin POSTHOOK: type: SHOW_ROLES -PREHOOK: query: -- add data to exchange_partition_test_1 - -insert overwrite table exchange_partition_test_1 partition (b=1) select key from src +PREHOOK: query: insert overwrite table exchange_partition_test_1 partition (b=1) select key from src PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@exchange_partition_test_1@b=1 -POSTHOOK: query: -- add data to exchange_partition_test_1 - -insert overwrite table exchange_partition_test_1 partition (b=1) select key from src +POSTHOOK: query: insert overwrite table exchange_partition_test_1 partition (b=1) select key from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@exchange_partition_test_1@b=1 POSTHOOK: Lineage: exchange_partition_test_1 PARTITION(b=1).a EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: -- grant select, delete privileges to user2 on exchange_partition_test_1 - -grant select, delete on exchange_partition_test_1 to user user2 +PREHOOK: query: grant select, delete on exchange_partition_test_1 to user user2 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@exchange_partition_test_1 -POSTHOOK: query: -- grant select, delete privileges to user2 on exchange_partition_test_1 - -grant select, delete on exchange_partition_test_1 to user user2 +POSTHOOK: query: grant select, delete on exchange_partition_test_1 to user user2 POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@exchange_partition_test_1 PREHOOK: query: set role all @@ -55,19 +43,11 @@ POSTHOOK: query: show grant user user2 on table exchange_partition_test_1 POSTHOOK: type: SHOW_GRANT default exchange_partition_test_1 user2 USER DELETE false -1 hive_admin_user default exchange_partition_test_1 user2 USER SELECT false -1 hive_admin_user -PREHOOK: query: -- switch user - --- create a table owned by user2 (as a result user2 will have insert privilege) - -create table exchange_partition_test_2(a int) partitioned by (b int) +PREHOOK: query: create table exchange_partition_test_2(a int) partitioned by (b int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@exchange_partition_test_2 -POSTHOOK: query: -- switch user - --- create a table owned by user2 (as a result user2 will have insert privilege) - -create table exchange_partition_test_2(a int) partitioned by (b int) +POSTHOOK: query: create table exchange_partition_test_2(a int) partitioned by (b int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@exchange_partition_test_2 @@ -79,13 +59,9 @@ default exchange_partition_test_2 user2 USER DELETE true -1 user1 default exchange_partition_test_2 user2 USER INSERT true -1 user1 default exchange_partition_test_2 user2 USER SELECT true -1 user1 default exchange_partition_test_2 user2 USER UPDATE true -1 user1 -PREHOOK: query: -- execute alter table exchange partition to add data to exchange_partition_test_2 - -explain authorization alter table exchange_partition_test_2 exchange partition (b=1) with table exchange_partition_test_1 +PREHOOK: query: explain authorization alter table exchange_partition_test_2 exchange partition (b=1) with table exchange_partition_test_1 PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION -POSTHOOK: query: -- execute alter table exchange partition to add data to exchange_partition_test_2 - -explain authorization alter table exchange_partition_test_2 exchange partition (b=1) with table exchange_partition_test_1 +POSTHOOK: query: explain authorization alter table exchange_partition_test_2 exchange partition (b=1) with table exchange_partition_test_1 POSTHOOK: type: ALTERTABLE_EXCHANGEPARTITION INPUTS: default@exchange_partition_test_1 diff --git a/ql/src/test/results/clientpositive/authorization_cli_createtab.q.out b/ql/src/test/results/clientpositive/authorization_cli_createtab.q.out index a75d64b..79456ce 100644 --- a/ql/src/test/results/clientpositive/authorization_cli_createtab.q.out +++ b/ql/src/test/results/clientpositive/authorization_cli_createtab.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- verify that sql std auth can be set as the authorizer with hive cli --- and that the create table/view result in correct permissions (suitable for sql std auth mode) - -create table t_cli(i int) +PREHOOK: query: create table t_cli(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t_cli -POSTHOOK: query: -- verify that sql std auth can be set as the authorizer with hive cli --- and that the create table/view result in correct permissions (suitable for sql std auth mode) - -create table t_cli(i int) +POSTHOOK: query: create table t_cli(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t_cli diff --git a/ql/src/test/results/clientpositive/authorization_cli_createtab_noauthzapi.q.out b/ql/src/test/results/clientpositive/authorization_cli_createtab_noauthzapi.q.out index 1375dfa..7c8909a 100644 --- a/ql/src/test/results/clientpositive/authorization_cli_createtab_noauthzapi.q.out +++ b/ql/src/test/results/clientpositive/authorization_cli_createtab_noauthzapi.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- verify that sql std auth can be set as the authorizer with hive cli, while metastore authorization api calls are disabled (for cli) - -create table t_cli(i int) +PREHOOK: query: create table t_cli(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t_cli -POSTHOOK: query: -- verify that sql std auth can be set as the authorizer with hive cli, while metastore authorization api calls are disabled (for cli) - -create table t_cli(i int) +POSTHOOK: query: create table t_cli(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t_cli diff --git a/ql/src/test/results/clientpositive/authorization_cli_nonsql.q.out b/ql/src/test/results/clientpositive/authorization_cli_nonsql.q.out index ded382b..92e7b2b 100644 --- a/ql/src/test/results/clientpositive/authorization_cli_nonsql.q.out +++ b/ql/src/test/results/clientpositive/authorization_cli_nonsql.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- Verify that dfs,compile,add,delete commands can be run from hive cli, and no authorization checks happen when auth is diabled - -use default +PREHOOK: query: use default PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:default -POSTHOOK: query: -- Verify that dfs,compile,add,delete commands can be run from hive cli, and no authorization checks happen when auth is diabled - -use default +POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default PREHOOK: query: create table a_table1(a int, b int) diff --git a/ql/src/test/results/clientpositive/authorization_cli_stdconfigauth.q.out b/ql/src/test/results/clientpositive/authorization_cli_stdconfigauth.q.out index a70b2bc..78482f6 100644 --- a/ql/src/test/results/clientpositive/authorization_cli_stdconfigauth.q.out +++ b/ql/src/test/results/clientpositive/authorization_cli_stdconfigauth.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- verify that SQLStdConfOnlyAuthorizerFactory as the authorizer factory with hive cli, with hive.security.authorization.enabled=true --- authorization verification would be just no-op - -create table t_cli(i int) +PREHOOK: query: create table t_cli(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t_cli -POSTHOOK: query: -- verify that SQLStdConfOnlyAuthorizerFactory as the authorizer factory with hive cli, with hive.security.authorization.enabled=true --- authorization verification would be just no-op - -create table t_cli(i int) +POSTHOOK: query: create table t_cli(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t_cli diff --git a/ql/src/test/results/clientpositive/authorization_create_func1.q.out b/ql/src/test/results/clientpositive/authorization_create_func1.q.out index 120dacc..d7de21a 100644 --- a/ql/src/test/results/clientpositive/authorization_create_func1.q.out +++ b/ql/src/test/results/clientpositive/authorization_create_func1.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- admin required for create function -set role ADMIN +PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES -POSTHOOK: query: -- admin required for create function -set role ADMIN +POSTHOOK: query: set role ADMIN POSTHOOK: type: SHOW_ROLES PREHOOK: query: create temporary function temp_fn as 'org.apache.hadoop.hive.ql.udf.UDFAscii' PREHOOK: type: CREATEFUNCTION diff --git a/ql/src/test/results/clientpositive/authorization_create_macro1.q.out b/ql/src/test/results/clientpositive/authorization_create_macro1.q.out index 9932cdd..f7fa8ca 100644 --- a/ql/src/test/results/clientpositive/authorization_create_macro1.q.out +++ b/ql/src/test/results/clientpositive/authorization_create_macro1.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- admin required for create macro -set role ADMIN +PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES -POSTHOOK: query: -- admin required for create macro -set role ADMIN +POSTHOOK: query: set role ADMIN POSTHOOK: type: SHOW_ROLES PREHOOK: query: create temporary macro mymacro1(x double) x * x PREHOOK: type: CREATEMACRO diff --git a/ql/src/test/results/clientpositive/authorization_create_table_owner_privs.q.out b/ql/src/test/results/clientpositive/authorization_create_table_owner_privs.q.out index 7130db8..df7069e 100644 --- a/ql/src/test/results/clientpositive/authorization_create_table_owner_privs.q.out +++ b/ql/src/test/results/clientpositive/authorization_create_table_owner_privs.q.out @@ -6,13 +6,9 @@ POSTHOOK: query: create table create_table_creator_priv_test(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@create_table_creator_priv_test -PREHOOK: query: -- all privileges should have been set for user - -show grant user user1 on table create_table_creator_priv_test +PREHOOK: query: show grant user user1 on table create_table_creator_priv_test PREHOOK: type: SHOW_GRANT -POSTHOOK: query: -- all privileges should have been set for user - -show grant user user1 on table create_table_creator_priv_test +POSTHOOK: query: show grant user user1 on table create_table_creator_priv_test POSTHOOK: type: SHOW_GRANT default create_table_creator_priv_test user1 USER DELETE true -1 user1 default create_table_creator_priv_test user1 USER INSERT true -1 user1 diff --git a/ql/src/test/results/clientpositive/authorization_default_create_table_owner_privs.q.out b/ql/src/test/results/clientpositive/authorization_default_create_table_owner_privs.q.out index eaa037e..5b5a99f 100644 --- a/ql/src/test/results/clientpositive/authorization_default_create_table_owner_privs.q.out +++ b/ql/src/test/results/clientpositive/authorization_default_create_table_owner_privs.q.out @@ -6,10 +6,8 @@ POSTHOOK: query: create table default_auth_table_creator_priv_test(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@default_auth_table_creator_priv_test -#### A masked pattern was here #### -show grant on table default_auth_table_creator_priv_test +PREHOOK: query: show grant on table default_auth_table_creator_priv_test PREHOOK: type: SHOW_GRANT -#### A masked pattern was here #### -show grant on table default_auth_table_creator_priv_test +POSTHOOK: query: show grant on table default_auth_table_creator_priv_test POSTHOOK: type: SHOW_GRANT default default_auth_table_creator_priv_test hive_test_user USER ALL true -1 hive_test_user diff --git a/ql/src/test/results/clientpositive/authorization_delete.q.out b/ql/src/test/results/clientpositive/authorization_delete.q.out index 260b9a4..7e2192b 100644 --- a/ql/src/test/results/clientpositive/authorization_delete.q.out +++ b/ql/src/test/results/clientpositive/authorization_delete.q.out @@ -1,21 +1,15 @@ -PREHOOK: query: -- current user has been set (comment line before the set cmd is resulting in parse error!!) - -CREATE TABLE t_auth_del(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: query: CREATE TABLE t_auth_del(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t_auth_del -POSTHOOK: query: -- current user has been set (comment line before the set cmd is resulting in parse error!!) - -CREATE TABLE t_auth_del(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: query: CREATE TABLE t_auth_del(i int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t_auth_del -PREHOOK: query: -- grant update privilege to another user -GRANT DELETE ON t_auth_del TO USER userWIns +PREHOOK: query: GRANT DELETE ON t_auth_del TO USER userWIns PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@t_auth_del -POSTHOOK: query: -- grant update privilege to another user -GRANT DELETE ON t_auth_del TO USER userWIns +POSTHOOK: query: GRANT DELETE ON t_auth_del TO USER userWIns POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@t_auth_del PREHOOK: query: GRANT SELECT ON t_auth_del TO USER userWIns diff --git a/ql/src/test/results/clientpositive/authorization_grant_option_role.q.out b/ql/src/test/results/clientpositive/authorization_grant_option_role.q.out index 745a8ae..63f2ab4 100644 --- a/ql/src/test/results/clientpositive/authorization_grant_option_role.q.out +++ b/ql/src/test/results/clientpositive/authorization_grant_option_role.q.out @@ -18,33 +18,25 @@ POSTHOOK: query: CREATE TABLE t1(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 -PREHOOK: query: -- all privileges should have been set for user - -GRANT ALL ON t1 TO ROLE r1 WITH GRANT OPTION +PREHOOK: query: GRANT ALL ON t1 TO ROLE r1 WITH GRANT OPTION PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@t1 -POSTHOOK: query: -- all privileges should have been set for user - -GRANT ALL ON t1 TO ROLE r1 WITH GRANT OPTION +POSTHOOK: query: GRANT ALL ON t1 TO ROLE r1 WITH GRANT OPTION POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@t1 -PREHOOK: query: -- check if user belong to role r1 can grant privileges to others -GRANT ALL ON t1 TO USER user3 +PREHOOK: query: GRANT ALL ON t1 TO USER user3 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@t1 -POSTHOOK: query: -- check if user belong to role r1 can grant privileges to others -GRANT ALL ON t1 TO USER user3 +POSTHOOK: query: GRANT ALL ON t1 TO USER user3 POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@t1 PREHOOK: query: set role admin PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role admin POSTHOOK: type: SHOW_ROLES -PREHOOK: query: -- check privileges on table -show grant on table t1 +PREHOOK: query: show grant on table t1 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: -- check privileges on table -show grant on table t1 +POSTHOOK: query: show grant on table t1 POSTHOOK: type: SHOW_GRANT default t1 r1 ROLE DELETE true -1 user1 default t1 r1 ROLE INSERT true -1 user1 @@ -58,11 +50,9 @@ default t1 user3 USER DELETE false -1 r1user default t1 user3 USER INSERT false -1 r1user default t1 user3 USER SELECT false -1 r1user default t1 user3 USER UPDATE false -1 r1user -PREHOOK: query: -- check if drop role removes privileges for that role -drop role r1 +PREHOOK: query: drop role r1 PREHOOK: type: DROPROLE -POSTHOOK: query: -- check if drop role removes privileges for that role -drop role r1 +POSTHOOK: query: drop role r1 POSTHOOK: type: DROPROLE PREHOOK: query: show grant on table t1 PREHOOK: type: SHOW_GRANT diff --git a/ql/src/test/results/clientpositive/authorization_grant_public_role.q.out b/ql/src/test/results/clientpositive/authorization_grant_public_role.q.out index 1b3cfdc..c35de68 100644 --- a/ql/src/test/results/clientpositive/authorization_grant_public_role.q.out +++ b/ql/src/test/results/clientpositive/authorization_grant_public_role.q.out @@ -1,23 +1,15 @@ -PREHOOK: query: -- current user has been set (comment line before the set cmd is resulting in parse error!!) - -CREATE TABLE t_gpr1(i int) +PREHOOK: query: CREATE TABLE t_gpr1(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t_gpr1 -POSTHOOK: query: -- current user has been set (comment line before the set cmd is resulting in parse error!!) - -CREATE TABLE t_gpr1(i int) +POSTHOOK: query: CREATE TABLE t_gpr1(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t_gpr1 -PREHOOK: query: -- all privileges should have been set for user - -GRANT ALL ON t_gpr1 TO ROLE pubLic +PREHOOK: query: GRANT ALL ON t_gpr1 TO ROLE pubLic PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@t_gpr1 -POSTHOOK: query: -- all privileges should have been set for user - -GRANT ALL ON t_gpr1 TO ROLE pubLic +POSTHOOK: query: GRANT ALL ON t_gpr1 TO ROLE pubLic POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@t_gpr1 PREHOOK: query: SHOW GRANT USER user1 ON TABLE t_gpr1 @@ -41,12 +33,10 @@ PREHOOK: type: SHOW_ROLES POSTHOOK: query: SHOW CURRENT ROLES POSTHOOK: type: SHOW_ROLES public -PREHOOK: query: -- user2 should be able to do a describe table, as pubic is in the current roles -DESC t_gpr1 +PREHOOK: query: DESC t_gpr1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@t_gpr1 -POSTHOOK: query: -- user2 should be able to do a describe table, as pubic is in the current roles -DESC t_gpr1 +POSTHOOK: query: DESC t_gpr1 POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@t_gpr1 i int diff --git a/ql/src/test/results/clientpositive/authorization_grant_table_priv.q.out b/ql/src/test/results/clientpositive/authorization_grant_table_priv.q.out index 58ede72..19a845a 100644 --- a/ql/src/test/results/clientpositive/authorization_grant_table_priv.q.out +++ b/ql/src/test/results/clientpositive/authorization_grant_table_priv.q.out @@ -1,25 +1,15 @@ -PREHOOK: query: -- current user has been set (comment line before the set cmd is resulting in parse error!!) - -CREATE TABLE table_priv1(i int) +PREHOOK: query: CREATE TABLE table_priv1(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@table_priv1 -POSTHOOK: query: -- current user has been set (comment line before the set cmd is resulting in parse error!!) - -CREATE TABLE table_priv1(i int) +POSTHOOK: query: CREATE TABLE table_priv1(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@table_priv1 -PREHOOK: query: -- all privileges should have been set for user - --- grant insert privilege to another user -GRANT INSERT ON table_priv1 TO USER user2 +PREHOOK: query: GRANT INSERT ON table_priv1 TO USER user2 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@table_priv1 -POSTHOOK: query: -- all privileges should have been set for user - --- grant insert privilege to another user -GRANT INSERT ON table_priv1 TO USER user2 +POSTHOOK: query: GRANT INSERT ON table_priv1 TO USER user2 POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@table_priv1 PREHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv1 @@ -27,12 +17,10 @@ PREHOOK: type: SHOW_GRANT POSTHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv1 POSTHOOK: type: SHOW_GRANT default table_priv1 user2 USER INSERT false -1 user1 -PREHOOK: query: -- grant select privilege to another user with grant -GRANT SELECT ON table_priv1 TO USER user2 with grant option +PREHOOK: query: GRANT SELECT ON table_priv1 TO USER user2 with grant option PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@table_priv1 -POSTHOOK: query: -- grant select privilege to another user with grant -GRANT SELECT ON table_priv1 TO USER user2 with grant option +POSTHOOK: query: GRANT SELECT ON table_priv1 TO USER user2 with grant option POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@table_priv1 PREHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv1 @@ -41,14 +29,10 @@ POSTHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv1 POSTHOOK: type: SHOW_GRANT default table_priv1 user2 USER INSERT false -1 user1 default table_priv1 user2 USER SELECT true -1 user1 -PREHOOK: query: -- changed to other user - user2 --- grant permissions to another user as user2 -GRANT SELECT ON table_priv1 TO USER user3 with grant option +PREHOOK: query: GRANT SELECT ON table_priv1 TO USER user3 with grant option PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@table_priv1 -POSTHOOK: query: -- changed to other user - user2 --- grant permissions to another user as user2 -GRANT SELECT ON table_priv1 TO USER user3 with grant option +POSTHOOK: query: GRANT SELECT ON table_priv1 TO USER user3 with grant option POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@table_priv1 PREHOOK: query: SHOW GRANT USER user3 ON TABLE table_priv1 @@ -56,14 +40,10 @@ PREHOOK: type: SHOW_GRANT POSTHOOK: query: SHOW GRANT USER user3 ON TABLE table_priv1 POSTHOOK: type: SHOW_GRANT default table_priv1 user3 USER SELECT true -1 user2 -PREHOOK: query: -- change to other user - user3 --- grant permissions to another user as user3 -GRANT SELECT ON table_priv1 TO USER user4 with grant option +PREHOOK: query: GRANT SELECT ON table_priv1 TO USER user4 with grant option PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@table_priv1 -POSTHOOK: query: -- change to other user - user3 --- grant permissions to another user as user3 -GRANT SELECT ON table_priv1 TO USER user4 with grant option +POSTHOOK: query: GRANT SELECT ON table_priv1 TO USER user4 with grant option POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@table_priv1 PREHOOK: query: SHOW GRANT USER user4 ON TABLE table_priv1 @@ -71,16 +51,10 @@ PREHOOK: type: SHOW_GRANT POSTHOOK: query: SHOW GRANT USER user4 ON TABLE table_priv1 POSTHOOK: type: SHOW_GRANT default table_priv1 user4 USER SELECT true -1 user3 -#### A masked pattern was here #### - --- grant all with grant to user22 -GRANT ALL ON table_priv1 TO USER user22 with grant option +PREHOOK: query: GRANT ALL ON table_priv1 TO USER user22 with grant option PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@table_priv1 -#### A masked pattern was here #### - --- grant all with grant to user22 -GRANT ALL ON table_priv1 TO USER user22 with grant option +POSTHOOK: query: GRANT ALL ON table_priv1 TO USER user22 with grant option POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@table_priv1 PREHOOK: query: SHOW GRANT USER user22 ON TABLE table_priv1 @@ -91,12 +65,10 @@ default table_priv1 user22 USER DELETE true -1 user1 default table_priv1 user22 USER INSERT true -1 user1 default table_priv1 user22 USER SELECT true -1 user1 default table_priv1 user22 USER UPDATE true -1 user1 -PREHOOK: query: -- grant all without grant to user33 -GRANT ALL ON table_priv1 TO USER user33 with grant option +PREHOOK: query: GRANT ALL ON table_priv1 TO USER user33 with grant option PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@table_priv1 -POSTHOOK: query: -- grant all without grant to user33 -GRANT ALL ON table_priv1 TO USER user33 with grant option +POSTHOOK: query: GRANT ALL ON table_priv1 TO USER user33 with grant option POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@table_priv1 PREHOOK: query: SHOW GRANT USER user33 ON TABLE table_priv1 diff --git a/ql/src/test/results/clientpositive/authorization_insert.q.out b/ql/src/test/results/clientpositive/authorization_insert.q.out index d49e6c2..ad11c8c 100644 --- a/ql/src/test/results/clientpositive/authorization_insert.q.out +++ b/ql/src/test/results/clientpositive/authorization_insert.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- current user has been set (comment line before the set cmd is resulting in parse error!!) - -CREATE TABLE t_auth_ins(i int) +PREHOOK: query: CREATE TABLE t_auth_ins(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t_auth_ins -POSTHOOK: query: -- current user has been set (comment line before the set cmd is resulting in parse error!!) - -CREATE TABLE t_auth_ins(i int) +POSTHOOK: query: CREATE TABLE t_auth_ins(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t_auth_ins @@ -24,12 +20,10 @@ PREHOOK: Output: default@t_select POSTHOOK: query: GRANT ALL ON TABLE t_select TO ROLE public POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@t_select -PREHOOK: query: -- grant insert privilege to another user -GRANT INSERT ON t_auth_ins TO USER userWIns +PREHOOK: query: GRANT INSERT ON t_auth_ins TO USER userWIns PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@t_auth_ins -POSTHOOK: query: -- grant insert privilege to another user -GRANT INSERT ON t_auth_ins TO USER userWIns +POSTHOOK: query: GRANT INSERT ON t_auth_ins TO USER userWIns POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@t_auth_ins PREHOOK: query: GRANT INSERT,DELETE ON t_auth_ins TO USER userWInsAndDel diff --git a/ql/src/test/results/clientpositive/authorization_load.q.out b/ql/src/test/results/clientpositive/authorization_load.q.out index 513ac53..b87ea95 100644 --- a/ql/src/test/results/clientpositive/authorization_load.q.out +++ b/ql/src/test/results/clientpositive/authorization_load.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- the main goal of these tests is to run a simple load and a load with regex, while being in the scope of SQLStdHiveAuthorizer - -create table t_auth_load(key string, value string) stored as textfile +PREHOOK: query: create table t_auth_load(key string, value string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t_auth_load -POSTHOOK: query: -- the main goal of these tests is to run a simple load and a load with regex, while being in the scope of SQLStdHiveAuthorizer - -create table t_auth_load(key string, value string) stored as textfile +POSTHOOK: query: create table t_auth_load(key string, value string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t_auth_load @@ -116,13 +112,11 @@ key string value string #### A masked pattern was here #### -PREHOOK: query: -- the following two selects should be identical -select count(*) from t_auth_load +PREHOOK: query: select count(*) from t_auth_load PREHOOK: type: QUERY PREHOOK: Input: default@t_auth_load #### A masked pattern was here #### -POSTHOOK: query: -- the following two selects should be identical -select count(*) from t_auth_load +POSTHOOK: query: select count(*) from t_auth_load POSTHOOK: type: QUERY POSTHOOK: Input: default@t_auth_load #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/authorization_owner_actions.q.out b/ql/src/test/results/clientpositive/authorization_owner_actions.q.out index 41d339c..5edce56 100644 --- a/ql/src/test/results/clientpositive/authorization_owner_actions.q.out +++ b/ql/src/test/results/clientpositive/authorization_owner_actions.q.out @@ -1,10 +1,8 @@ -#### A masked pattern was here #### -create table t1(i int) +PREHOOK: query: create table t1(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -#### A masked pattern was here #### -create table t1(i int) +POSTHOOK: query: create table t1(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 diff --git a/ql/src/test/results/clientpositive/authorization_owner_actions_db.q.out b/ql/src/test/results/clientpositive/authorization_owner_actions_db.q.out index 1d2defe..455ec42 100644 --- a/ql/src/test/results/clientpositive/authorization_owner_actions_db.q.out +++ b/ql/src/test/results/clientpositive/authorization_owner_actions_db.q.out @@ -2,11 +2,9 @@ PREHOOK: query: set role admin PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role admin POSTHOOK: type: SHOW_ROLES -#### A masked pattern was here #### -create role testrole +PREHOOK: query: create role testrole PREHOOK: type: CREATEROLE -#### A masked pattern was here #### -create role testrole +POSTHOOK: query: create role testrole POSTHOOK: type: CREATEROLE PREHOOK: query: grant role testrole to user hrt_1 PREHOOK: type: GRANT_ROLE @@ -31,14 +29,10 @@ POSTHOOK: query: desc database testdb POSTHOOK: type: DESCDATABASE POSTHOOK: Input: database:testdb testdb location/in/test testrole ROLE -#### A masked pattern was here #### --- create table -use testdb +PREHOOK: query: use testdb PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:testdb -#### A masked pattern was here #### --- create table -use testdb +POSTHOOK: query: use testdb POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:testdb PREHOOK: query: create table foobar (foo string, bar string) @@ -49,14 +43,12 @@ POSTHOOK: query: create table foobar (foo string, bar string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:testdb POSTHOOK: Output: testdb@foobar -PREHOOK: query: -- drop db -drop database testdb cascade +PREHOOK: query: drop database testdb cascade PREHOOK: type: DROPDATABASE PREHOOK: Input: database:testdb PREHOOK: Output: database:testdb PREHOOK: Output: testdb@foobar -POSTHOOK: query: -- drop db -drop database testdb cascade +POSTHOOK: query: drop database testdb cascade POSTHOOK: type: DROPDATABASE POSTHOOK: Input: database:testdb POSTHOOK: Output: database:testdb diff --git a/ql/src/test/results/clientpositive/authorization_parts.q.out b/ql/src/test/results/clientpositive/authorization_parts.q.out index bda6e83..bc600f3 100644 --- a/ql/src/test/results/clientpositive/authorization_parts.q.out +++ b/ql/src/test/results/clientpositive/authorization_parts.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- check add partition without insert privilege -create table tpart(i int, j int) partitioned by (k string) +PREHOOK: query: create table tpart(i int, j int) partitioned by (k string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tpart -POSTHOOK: query: -- check add partition without insert privilege -create table tpart(i int, j int) partitioned by (k string) +POSTHOOK: query: create table tpart(i int, j int) partitioned by (k string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tpart diff --git a/ql/src/test/results/clientpositive/authorization_revoke_table_priv.q.out b/ql/src/test/results/clientpositive/authorization_revoke_table_priv.q.out index 187dc63..2803823 100644 --- a/ql/src/test/results/clientpositive/authorization_revoke_table_priv.q.out +++ b/ql/src/test/results/clientpositive/authorization_revoke_table_priv.q.out @@ -1,21 +1,15 @@ -PREHOOK: query: -- current user has been set (comment line before the set cmd is resulting in parse error!!) - -CREATE TABLE table_priv_rev(i int) +PREHOOK: query: CREATE TABLE table_priv_rev(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@table_priv_rev -POSTHOOK: query: -- current user has been set (comment line before the set cmd is resulting in parse error!!) - -CREATE TABLE table_priv_rev(i int) +POSTHOOK: query: CREATE TABLE table_priv_rev(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@table_priv_rev -PREHOOK: query: -- grant insert privilege to user2 -GRANT INSERT ON table_priv_rev TO USER user2 +PREHOOK: query: GRANT INSERT ON table_priv_rev TO USER user2 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@table_priv_rev -POSTHOOK: query: -- grant insert privilege to user2 -GRANT INSERT ON table_priv_rev TO USER user2 +POSTHOOK: query: GRANT INSERT ON table_priv_rev TO USER user2 POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@table_priv_rev PREHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv_rev @@ -28,26 +22,20 @@ PREHOOK: type: SHOW_GRANT POSTHOOK: query: SHOW GRANT USER user2 ON ALL POSTHOOK: type: SHOW_GRANT default table_priv_rev user2 USER INSERT false -1 user1 -PREHOOK: query: -- revoke insert privilege from user2 -REVOKE INSERT ON TABLE table_priv_rev FROM USER user2 +PREHOOK: query: REVOKE INSERT ON TABLE table_priv_rev FROM USER user2 PREHOOK: type: REVOKE_PRIVILEGE PREHOOK: Output: default@table_priv_rev -POSTHOOK: query: -- revoke insert privilege from user2 -REVOKE INSERT ON TABLE table_priv_rev FROM USER user2 +POSTHOOK: query: REVOKE INSERT ON TABLE table_priv_rev FROM USER user2 POSTHOOK: type: REVOKE_PRIVILEGE POSTHOOK: Output: default@table_priv_rev PREHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv_rev PREHOOK: type: SHOW_GRANT POSTHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv_rev POSTHOOK: type: SHOW_GRANT -PREHOOK: query: -- grant all privileges one at a time -- --- grant insert privilege to user2 -GRANT INSERT ON table_priv_rev TO USER user2 +PREHOOK: query: GRANT INSERT ON table_priv_rev TO USER user2 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@table_priv_rev -POSTHOOK: query: -- grant all privileges one at a time -- --- grant insert privilege to user2 -GRANT INSERT ON table_priv_rev TO USER user2 +POSTHOOK: query: GRANT INSERT ON table_priv_rev TO USER user2 POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@table_priv_rev PREHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv_rev @@ -60,12 +48,10 @@ PREHOOK: type: SHOW_GRANT POSTHOOK: query: SHOW GRANT USER user2 ON ALL POSTHOOK: type: SHOW_GRANT default table_priv_rev user2 USER INSERT false -1 user1 -PREHOOK: query: -- grant select privilege to user2, with grant option -GRANT SELECT ON table_priv_rev TO USER user2 WITH GRANT OPTION +PREHOOK: query: GRANT SELECT ON table_priv_rev TO USER user2 WITH GRANT OPTION PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@table_priv_rev -POSTHOOK: query: -- grant select privilege to user2, with grant option -GRANT SELECT ON table_priv_rev TO USER user2 WITH GRANT OPTION +POSTHOOK: query: GRANT SELECT ON table_priv_rev TO USER user2 WITH GRANT OPTION POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@table_priv_rev PREHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv_rev @@ -74,12 +60,10 @@ POSTHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv_rev POSTHOOK: type: SHOW_GRANT default table_priv_rev user2 USER INSERT false -1 user1 default table_priv_rev user2 USER SELECT true -1 user1 -PREHOOK: query: -- grant update privilege to user2 -GRANT UPDATE ON table_priv_rev TO USER user2 +PREHOOK: query: GRANT UPDATE ON table_priv_rev TO USER user2 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@table_priv_rev -POSTHOOK: query: -- grant update privilege to user2 -GRANT UPDATE ON table_priv_rev TO USER user2 +POSTHOOK: query: GRANT UPDATE ON table_priv_rev TO USER user2 POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@table_priv_rev PREHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv_rev @@ -89,12 +73,10 @@ POSTHOOK: type: SHOW_GRANT default table_priv_rev user2 USER INSERT false -1 user1 default table_priv_rev user2 USER SELECT true -1 user1 default table_priv_rev user2 USER UPDATE false -1 user1 -PREHOOK: query: -- grant delete privilege to user2 -GRANT DELETE ON table_priv_rev TO USER user2 +PREHOOK: query: GRANT DELETE ON table_priv_rev TO USER user2 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@table_priv_rev -POSTHOOK: query: -- grant delete privilege to user2 -GRANT DELETE ON table_priv_rev TO USER user2 +POSTHOOK: query: GRANT DELETE ON table_priv_rev TO USER user2 POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@table_priv_rev PREHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv_rev @@ -105,14 +87,10 @@ default table_priv_rev user2 USER DELETE false -1 user1 default table_priv_rev user2 USER INSERT false -1 user1 default table_priv_rev user2 USER SELECT true -1 user1 default table_priv_rev user2 USER UPDATE false -1 user1 -PREHOOK: query: -- start revoking -- --- revoke update privilege from user2 -REVOKE UPDATE ON TABLE table_priv_rev FROM USER user2 +PREHOOK: query: REVOKE UPDATE ON TABLE table_priv_rev FROM USER user2 PREHOOK: type: REVOKE_PRIVILEGE PREHOOK: Output: default@table_priv_rev -POSTHOOK: query: -- start revoking -- --- revoke update privilege from user2 -REVOKE UPDATE ON TABLE table_priv_rev FROM USER user2 +POSTHOOK: query: REVOKE UPDATE ON TABLE table_priv_rev FROM USER user2 POSTHOOK: type: REVOKE_PRIVILEGE POSTHOOK: Output: default@table_priv_rev PREHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv_rev @@ -129,12 +107,10 @@ POSTHOOK: type: SHOW_GRANT default table_priv_rev user2 USER DELETE false -1 user1 default table_priv_rev user2 USER INSERT false -1 user1 default table_priv_rev user2 USER SELECT true -1 user1 -PREHOOK: query: -- revoke DELETE privilege from user2 -REVOKE DELETE ON TABLE table_priv_rev FROM USER user2 +PREHOOK: query: REVOKE DELETE ON TABLE table_priv_rev FROM USER user2 PREHOOK: type: REVOKE_PRIVILEGE PREHOOK: Output: default@table_priv_rev -POSTHOOK: query: -- revoke DELETE privilege from user2 -REVOKE DELETE ON TABLE table_priv_rev FROM USER user2 +POSTHOOK: query: REVOKE DELETE ON TABLE table_priv_rev FROM USER user2 POSTHOOK: type: REVOKE_PRIVILEGE POSTHOOK: Output: default@table_priv_rev PREHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv_rev @@ -143,12 +119,10 @@ POSTHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv_rev POSTHOOK: type: SHOW_GRANT default table_priv_rev user2 USER INSERT false -1 user1 default table_priv_rev user2 USER SELECT true -1 user1 -PREHOOK: query: -- revoke insert privilege from user2 -REVOKE INSERT ON TABLE table_priv_rev FROM USER user2 +PREHOOK: query: REVOKE INSERT ON TABLE table_priv_rev FROM USER user2 PREHOOK: type: REVOKE_PRIVILEGE PREHOOK: Output: default@table_priv_rev -POSTHOOK: query: -- revoke insert privilege from user2 -REVOKE INSERT ON TABLE table_priv_rev FROM USER user2 +POSTHOOK: query: REVOKE INSERT ON TABLE table_priv_rev FROM USER user2 POSTHOOK: type: REVOKE_PRIVILEGE POSTHOOK: Output: default@table_priv_rev PREHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv_rev @@ -156,12 +130,10 @@ PREHOOK: type: SHOW_GRANT POSTHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv_rev POSTHOOK: type: SHOW_GRANT default table_priv_rev user2 USER SELECT true -1 user1 -PREHOOK: query: -- revoke grant option for select privilege from user2 -REVOKE GRANT OPTION FOR SELECT ON TABLE table_priv_rev FROM USER user2 +PREHOOK: query: REVOKE GRANT OPTION FOR SELECT ON TABLE table_priv_rev FROM USER user2 PREHOOK: type: REVOKE_PRIVILEGE PREHOOK: Output: default@table_priv_rev -POSTHOOK: query: -- revoke grant option for select privilege from user2 -REVOKE GRANT OPTION FOR SELECT ON TABLE table_priv_rev FROM USER user2 +POSTHOOK: query: REVOKE GRANT OPTION FOR SELECT ON TABLE table_priv_rev FROM USER user2 POSTHOOK: type: REVOKE_PRIVILEGE POSTHOOK: Output: default@table_priv_rev PREHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv_rev @@ -169,12 +141,10 @@ PREHOOK: type: SHOW_GRANT POSTHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv_rev POSTHOOK: type: SHOW_GRANT default table_priv_rev user2 USER SELECT false -1 user1 -PREHOOK: query: -- revoke select privilege from user2 -REVOKE SELECT ON TABLE table_priv_rev FROM USER user2 +PREHOOK: query: REVOKE SELECT ON TABLE table_priv_rev FROM USER user2 PREHOOK: type: REVOKE_PRIVILEGE PREHOOK: Output: default@table_priv_rev -POSTHOOK: query: -- revoke select privilege from user2 -REVOKE SELECT ON TABLE table_priv_rev FROM USER user2 +POSTHOOK: query: REVOKE SELECT ON TABLE table_priv_rev FROM USER user2 POSTHOOK: type: REVOKE_PRIVILEGE POSTHOOK: Output: default@table_priv_rev PREHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv_rev @@ -185,12 +155,10 @@ PREHOOK: query: SHOW GRANT USER user2 ON ALL PREHOOK: type: SHOW_GRANT POSTHOOK: query: SHOW GRANT USER user2 ON ALL POSTHOOK: type: SHOW_GRANT -PREHOOK: query: -- grant all followed by revoke all -GRANT ALL ON table_priv_rev TO USER user2 +PREHOOK: query: GRANT ALL ON table_priv_rev TO USER user2 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@table_priv_rev -POSTHOOK: query: -- grant all followed by revoke all -GRANT ALL ON table_priv_rev TO USER user2 +POSTHOOK: query: GRANT ALL ON table_priv_rev TO USER user2 POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@table_priv_rev PREHOOK: query: SHOW GRANT USER user2 ON TABLE table_priv_rev diff --git a/ql/src/test/results/clientpositive/authorization_role_grant1.q.out b/ql/src/test/results/clientpositive/authorization_role_grant1.q.out index 9cd3f99..0f8bb83 100644 --- a/ql/src/test/results/clientpositive/authorization_role_grant1.q.out +++ b/ql/src/test/results/clientpositive/authorization_role_grant1.q.out @@ -1,12 +1,6 @@ -PREHOOK: query: -- enable sql standard authorization --- role granting without role keyword --- also test role being treated as case insensitive -set role ADMIN +PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES -POSTHOOK: query: -- enable sql standard authorization --- role granting without role keyword --- also test role being treated as case insensitive -set role ADMIN +POSTHOOK: query: set role ADMIN POSTHOOK: type: SHOW_ROLES PREHOOK: query: create role src_Role2 PREHOOK: type: CREATEROLE @@ -29,11 +23,9 @@ POSTHOOK: type: SHOW_ROLES admin public src_role2 -PREHOOK: query: -- revoke role without role keyword -revoke src_rolE2 from user user2 +PREHOOK: query: revoke src_rolE2 from user user2 PREHOOK: type: REVOKE_ROLE -POSTHOOK: query: -- revoke role without role keyword -revoke src_rolE2 from user user2 +POSTHOOK: query: revoke src_rolE2 from user user2 POSTHOOK: type: REVOKE_ROLE PREHOOK: query: show role grant user user2 PREHOOK: type: SHOW_ROLE_GRANT @@ -47,17 +39,9 @@ POSTHOOK: type: SHOW_ROLES admin public src_role2 -PREHOOK: query: ---------------------------------------- --- role granting without role keyword, with admin option (syntax check) ----------------------------------------- - -create role src_role_wadmin +PREHOOK: query: create role src_role_wadmin PREHOOK: type: CREATEROLE -POSTHOOK: query: ---------------------------------------- --- role granting without role keyword, with admin option (syntax check) ----------------------------------------- - -create role src_role_wadmin +POSTHOOK: query: create role src_role_wadmin POSTHOOK: type: CREATEROLE PREHOOK: query: grant src_role_wadmin to user user2 with admin option PREHOOK: type: GRANT_ROLE @@ -69,11 +53,9 @@ POSTHOOK: query: show role grant user user2 POSTHOOK: type: SHOW_ROLE_GRANT public false -1 src_role_wadmin true -1 hive_admin_user -PREHOOK: query: -- revoke admin option -revoke admin option for src_role_wadmin from user user2 +PREHOOK: query: revoke admin option for src_role_wadmin from user user2 PREHOOK: type: REVOKE_ROLE -POSTHOOK: query: -- revoke admin option -revoke admin option for src_role_wadmin from user user2 +POSTHOOK: query: revoke admin option for src_role_wadmin from user user2 POSTHOOK: type: REVOKE_ROLE PREHOOK: query: show role grant user user2 PREHOOK: type: SHOW_ROLE_GRANT @@ -81,22 +63,18 @@ POSTHOOK: query: show role grant user user2 POSTHOOK: type: SHOW_ROLE_GRANT public false -1 src_role_wadmin false -1 hive_admin_user -PREHOOK: query: -- revoke role without role keyword -revoke src_role_wadmin from user user2 +PREHOOK: query: revoke src_role_wadmin from user user2 PREHOOK: type: REVOKE_ROLE -POSTHOOK: query: -- revoke role without role keyword -revoke src_role_wadmin from user user2 +POSTHOOK: query: revoke src_role_wadmin from user user2 POSTHOOK: type: REVOKE_ROLE PREHOOK: query: show role grant user user2 PREHOOK: type: SHOW_ROLE_GRANT POSTHOOK: query: show role grant user user2 POSTHOOK: type: SHOW_ROLE_GRANT public false -1 -PREHOOK: query: -- drop roles -show roles +PREHOOK: query: show roles PREHOOK: type: SHOW_ROLES -POSTHOOK: query: -- drop roles -show roles +POSTHOOK: query: show roles POSTHOOK: type: SHOW_ROLES admin public diff --git a/ql/src/test/results/clientpositive/authorization_role_grant2.q.out b/ql/src/test/results/clientpositive/authorization_role_grant2.q.out index 3db507e..9196ebc 100644 --- a/ql/src/test/results/clientpositive/authorization_role_grant2.q.out +++ b/ql/src/test/results/clientpositive/authorization_role_grant2.q.out @@ -2,19 +2,9 @@ PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role ADMIN POSTHOOK: type: SHOW_ROLES -PREHOOK: query: ---------------------------------------- --- role granting with admin option ----------------------------------------- --- Also test case sensitivity of role name - -create role srC_role_wadmin +PREHOOK: query: create role srC_role_wadmin PREHOOK: type: CREATEROLE -POSTHOOK: query: ---------------------------------------- --- role granting with admin option ----------------------------------------- --- Also test case sensitivity of role name - -create role srC_role_wadmin +POSTHOOK: query: create role srC_role_wadmin POSTHOOK: type: CREATEROLE PREHOOK: query: create role src_roLe2 PREHOOK: type: CREATEROLE @@ -47,11 +37,9 @@ POSTHOOK: query: show principals src_role_wadmin POSTHOOK: type: SHOW_ROLE_PRINCIPALS principal_name principal_type grant_option grantor grantor_type grant_time user2 USER true hive_admin_user USER -1 -PREHOOK: query: -- grant role to another user -grant src_Role_wadmin to user user3 +PREHOOK: query: grant src_Role_wadmin to user user3 PREHOOK: type: GRANT_ROLE -POSTHOOK: query: -- grant role to another user -grant src_Role_wadmin to user user3 +POSTHOOK: query: grant src_Role_wadmin to user user3 POSTHOOK: type: GRANT_ROLE PREHOOK: query: show role grant user user3 PREHOOK: type: SHOW_ROLE_GRANT @@ -60,11 +48,9 @@ POSTHOOK: type: SHOW_ROLE_GRANT role grant_option grant_time grantor public false -1 src_role_wadmin false -1 user2 -PREHOOK: query: -- grant role to another role -grant src_role_wadmin to role sRc_role2 +PREHOOK: query: grant src_role_wadmin to role sRc_role2 PREHOOK: type: GRANT_ROLE -POSTHOOK: query: -- grant role to another role -grant src_role_wadmin to role sRc_role2 +POSTHOOK: query: grant src_role_wadmin to role sRc_role2 POSTHOOK: type: GRANT_ROLE PREHOOK: query: set role ADMIn PREHOOK: type: SHOW_ROLES @@ -74,11 +60,9 @@ PREHOOK: query: grant src_role2 to user user3 PREHOOK: type: GRANT_ROLE POSTHOOK: query: grant src_role2 to user user3 POSTHOOK: type: GRANT_ROLE -PREHOOK: query: -- as user3 belings to src_role2 hierarchy, its should be able to run show grant on it -show role grant role src_Role2 +PREHOOK: query: show role grant role src_Role2 PREHOOK: type: SHOW_ROLE_GRANT -POSTHOOK: query: -- as user3 belings to src_role2 hierarchy, its should be able to run show grant on it -show role grant role src_Role2 +POSTHOOK: query: show role grant role src_Role2 POSTHOOK: type: SHOW_ROLE_GRANT role grant_option grant_time grantor src_role_wadmin false -1 user2 @@ -98,11 +82,9 @@ PREHOOK: query: set role src_role_wadmin PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role src_role_wadmin POSTHOOK: type: SHOW_ROLES -PREHOOK: query: -- revoke user from role -revoke src_rolE_wadmin from user user3 +PREHOOK: query: revoke src_rolE_wadmin from user user3 PREHOOK: type: REVOKE_ROLE -POSTHOOK: query: -- revoke user from role -revoke src_rolE_wadmin from user user3 +POSTHOOK: query: revoke src_rolE_wadmin from user user3 POSTHOOK: type: REVOKE_ROLE PREHOOK: query: show role grant user user3 PREHOOK: type: SHOW_ROLE_GRANT @@ -111,11 +93,9 @@ POSTHOOK: type: SHOW_ROLE_GRANT role grant_option grant_time grantor public false -1 src_role2 false -1 hive_admin_user -PREHOOK: query: -- revoke role from role -revoke src_rolE_wadmin from role sRc_role2 +PREHOOK: query: revoke src_rolE_wadmin from role sRc_role2 PREHOOK: type: REVOKE_ROLE -POSTHOOK: query: -- revoke role from role -revoke src_rolE_wadmin from role sRc_role2 +POSTHOOK: query: revoke src_rolE_wadmin from role sRc_role2 POSTHOOK: type: REVOKE_ROLE PREHOOK: query: set role ADMIN PREHOOK: type: SHOW_ROLES diff --git a/ql/src/test/results/clientpositive/authorization_show_grant.q.out b/ql/src/test/results/clientpositive/authorization_show_grant.q.out index 7fa0b1c..d0fed81 100644 --- a/ql/src/test/results/clientpositive/authorization_show_grant.q.out +++ b/ql/src/test/results/clientpositive/authorization_show_grant.q.out @@ -2,13 +2,9 @@ PREHOOK: query: set role admin PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role admin POSTHOOK: type: SHOW_ROLES -PREHOOK: query: -- test show grant authorization - -create role roleA +PREHOOK: query: create role roleA PREHOOK: type: CREATEROLE -POSTHOOK: query: -- test show grant authorization - -create role roleA +POSTHOOK: query: create role roleA POSTHOOK: type: CREATEROLE PREHOOK: query: create role roleB PREHOOK: type: CREATEROLE @@ -22,13 +18,11 @@ PREHOOK: query: grant role roleB to role roleA PREHOOK: type: GRANT_ROLE POSTHOOK: query: grant role roleB to role roleA POSTHOOK: type: GRANT_ROLE -PREHOOK: query: -- create table and grant privileges to a role -create table t1(i int, j int, k int) +PREHOOK: query: create table t1(i int, j int, k int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- create table and grant privileges to a role -create table t1(i int, j int, k int) +POSTHOOK: query: create table t1(i int, j int, k int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 @@ -74,11 +68,9 @@ PREHOOK: query: set role admin PREHOOK: type: SHOW_ROLES POSTHOOK: query: set role admin POSTHOOK: type: SHOW_ROLES -PREHOOK: query: -- as user in admin role, it should be possible to see other users grant -show grant user user1 on table t1 +PREHOOK: query: show grant user user1 on table t1 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: -- as user in admin role, it should be possible to see other users grant -show grant user user1 on table t1 +POSTHOOK: query: show grant user user1 on table t1 POSTHOOK: type: SHOW_GRANT default t1 user1 USER DELETE true -1 hive_admin_user default t1 user1 USER INSERT true -1 hive_admin_user @@ -186,11 +178,9 @@ default t2 user1 USER SELECT true -1 hive_admin_user default t2 user1 USER UPDATE true -1 hive_admin_user default t2 userA USER INSERT false -1 user1 default t2 userA USER SELECT false -1 user1 -PREHOOK: query: -- user belonging to role should be able to see it -show grant role roleA on table t1 +PREHOOK: query: show grant role roleA on table t1 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: -- user belonging to role should be able to see it -show grant role roleA on table t1 +POSTHOOK: query: show grant role roleA on table t1 POSTHOOK: type: SHOW_GRANT default t1 rolea ROLE SELECT false -1 user1 PREHOOK: query: show grant role roleA diff --git a/ql/src/test/results/clientpositive/authorization_update.q.out b/ql/src/test/results/clientpositive/authorization_update.q.out index dc67ae3..e6f456f 100644 --- a/ql/src/test/results/clientpositive/authorization_update.q.out +++ b/ql/src/test/results/clientpositive/authorization_update.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- current user has been set (comment line before the set cmd is resulting in parse error!!) - -CREATE TABLE t_auth_up(i int, j int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: query: CREATE TABLE t_auth_up(i int, j int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t_auth_up -POSTHOOK: query: -- current user has been set (comment line before the set cmd is resulting in parse error!!) - -CREATE TABLE t_auth_up(i int, j int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: query: CREATE TABLE t_auth_up(i int, j int) clustered by (i) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t_auth_up @@ -24,12 +20,10 @@ PREHOOK: Output: default@t_select POSTHOOK: query: GRANT ALL ON TABLE t_select TO ROLE public POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@t_select -PREHOOK: query: -- grant update privilege to another user -GRANT UPDATE ON t_auth_up TO USER userWIns +PREHOOK: query: GRANT UPDATE ON t_auth_up TO USER userWIns PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@t_auth_up -POSTHOOK: query: -- grant update privilege to another user -GRANT UPDATE ON t_auth_up TO USER userWIns +POSTHOOK: query: GRANT UPDATE ON t_auth_up TO USER userWIns POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@t_auth_up PREHOOK: query: GRANT SELECT ON t_auth_up TO USER userWIns diff --git a/ql/src/test/results/clientpositive/authorization_view_1.q.out b/ql/src/test/results/clientpositive/authorization_view_1.q.out index 01ce416..89adf19 100644 --- a/ql/src/test/results/clientpositive/authorization_view_1.q.out +++ b/ql/src/test/results/clientpositive/authorization_view_1.q.out @@ -40,14 +40,10 @@ POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src_autho_test POSTHOOK: Output: database:default POSTHOOK: Output: default@v2 -PREHOOK: query: --table grant to user - -grant select on table src_autho_test to user hive_test_user +PREHOOK: query: grant select on table src_autho_test to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --table grant to user - -grant select on table src_autho_test to user hive_test_user +POSTHOOK: query: grant select on table src_autho_test to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: grant select on table v to user hive_test_user @@ -140,14 +136,10 @@ PREHOOK: query: show grant user hive_test_user on v(key) PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant user hive_test_user on v(key) POSTHOOK: type: SHOW_GRANT -PREHOOK: query: --column grant to user - -grant select on table src_autho_test to user hive_test_user +PREHOOK: query: grant select on table src_autho_test to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --column grant to user - -grant select on table src_autho_test to user hive_test_user +POSTHOOK: query: grant select on table src_autho_test to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: grant select(key) on table v to user hive_test_user diff --git a/ql/src/test/results/clientpositive/authorization_view_2.q.out b/ql/src/test/results/clientpositive/authorization_view_2.q.out index 0b61663..9f909be 100644 --- a/ql/src/test/results/clientpositive/authorization_view_2.q.out +++ b/ql/src/test/results/clientpositive/authorization_view_2.q.out @@ -32,14 +32,10 @@ POSTHOOK: Input: default@src_autho_test POSTHOOK: Input: default@v1 POSTHOOK: Output: database:default POSTHOOK: Output: default@v2 -PREHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +PREHOOK: query: grant select on table v2 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@v2 -POSTHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +POSTHOOK: query: grant select on table v2 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@v2 PREHOOK: query: select * from v2 order by key limit 10 diff --git a/ql/src/test/results/clientpositive/authorization_view_3.q.out b/ql/src/test/results/clientpositive/authorization_view_3.q.out index cbc40b5..c9334e3 100644 --- a/ql/src/test/results/clientpositive/authorization_view_3.q.out +++ b/ql/src/test/results/clientpositive/authorization_view_3.q.out @@ -32,14 +32,10 @@ POSTHOOK: Input: default@src_autho_test POSTHOOK: Input: default@v1 POSTHOOK: Output: database:default POSTHOOK: Output: default@v2 -PREHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +PREHOOK: query: grant select on table v2 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@v2 -POSTHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +POSTHOOK: query: grant select on table v2 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@v2 PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user diff --git a/ql/src/test/results/clientpositive/authorization_view_4.q.out b/ql/src/test/results/clientpositive/authorization_view_4.q.out index f859923..68fdfb6 100644 --- a/ql/src/test/results/clientpositive/authorization_view_4.q.out +++ b/ql/src/test/results/clientpositive/authorization_view_4.q.out @@ -32,14 +32,10 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@v1 POSTHOOK: Output: database:default POSTHOOK: Output: default@v2 -PREHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +PREHOOK: query: grant select on table v2 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@v2 -POSTHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +POSTHOOK: query: grant select on table v2 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@v2 PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user diff --git a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_1.q.out b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_1.q.out index 8fe92dd..8395782 100644 --- a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_1.q.out +++ b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_1.q.out @@ -40,14 +40,10 @@ POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src_autho_test POSTHOOK: Output: database:default POSTHOOK: Output: default@v2 -PREHOOK: query: --table grant to user - -grant select on table src_autho_test to user hive_test_user +PREHOOK: query: grant select on table src_autho_test to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --table grant to user - -grant select on table src_autho_test to user hive_test_user +POSTHOOK: query: grant select on table src_autho_test to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: grant select on table v to user hive_test_user @@ -140,14 +136,10 @@ PREHOOK: query: show grant user hive_test_user on v(key) PREHOOK: type: SHOW_GRANT POSTHOOK: query: show grant user hive_test_user on v(key) POSTHOOK: type: SHOW_GRANT -PREHOOK: query: --column grant to user - -grant select on table src_autho_test to user hive_test_user +PREHOOK: query: grant select on table src_autho_test to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@src_autho_test -POSTHOOK: query: --column grant to user - -grant select on table src_autho_test to user hive_test_user +POSTHOOK: query: grant select on table src_autho_test to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@src_autho_test PREHOOK: query: grant select(key) on table v to user hive_test_user @@ -261,16 +253,12 @@ val_11 val_111 val_113 val_114 -PREHOOK: query: --although cbo is enabled, it will not succeed. - -select key from v sort by key limit 10 +PREHOOK: query: select key from v sort by key limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@src_autho_test PREHOOK: Input: default@v #### A masked pattern was here #### -POSTHOOK: query: --although cbo is enabled, it will not succeed. - -select key from v sort by key limit 10 +POSTHOOK: query: select key from v sort by key limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src_autho_test POSTHOOK: Input: default@v diff --git a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_2.q.out b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_2.q.out index 0b61663..9f909be 100644 --- a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_2.q.out +++ b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_2.q.out @@ -32,14 +32,10 @@ POSTHOOK: Input: default@src_autho_test POSTHOOK: Input: default@v1 POSTHOOK: Output: database:default POSTHOOK: Output: default@v2 -PREHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +PREHOOK: query: grant select on table v2 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@v2 -POSTHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +POSTHOOK: query: grant select on table v2 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@v2 PREHOOK: query: select * from v2 order by key limit 10 diff --git a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_3.q.out b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_3.q.out index cbc40b5..c9334e3 100644 --- a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_3.q.out +++ b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_3.q.out @@ -32,14 +32,10 @@ POSTHOOK: Input: default@src_autho_test POSTHOOK: Input: default@v1 POSTHOOK: Output: database:default POSTHOOK: Output: default@v2 -PREHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +PREHOOK: query: grant select on table v2 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@v2 -POSTHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +POSTHOOK: query: grant select on table v2 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@v2 PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user diff --git a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_4.q.out b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_4.q.out index f859923..68fdfb6 100644 --- a/ql/src/test/results/clientpositive/authorization_view_disable_cbo_4.q.out +++ b/ql/src/test/results/clientpositive/authorization_view_disable_cbo_4.q.out @@ -32,14 +32,10 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@v1 POSTHOOK: Output: database:default POSTHOOK: Output: default@v2 -PREHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +PREHOOK: query: grant select on table v2 to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@v2 -POSTHOOK: query: --table grant to user - -grant select on table v2 to user hive_test_user +POSTHOOK: query: grant select on table v2 to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@v2 PREHOOK: query: grant select(key) on table src_autho_test to user hive_test_user diff --git a/ql/src/test/results/clientpositive/autoColumnStats_5.q.out b/ql/src/test/results/clientpositive/autoColumnStats_5.q.out index 07c64db..e19fb5f 100644 --- a/ql/src/test/results/clientpositive/autoColumnStats_5.q.out +++ b/ql/src/test/results/clientpositive/autoColumnStats_5.q.out @@ -1,22 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- --- FILE VARIATION: TEXT, Non-Vectorized, MapWork, Partitioned --- --- --- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT ---- -CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@partitioned1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- --- FILE VARIATION: TEXT, Non-Vectorized, MapWork, Partitioned --- --- --- SECTION VARIATION: ALTER TABLE ADD COLUMNS ... STATIC INSERT ---- -CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE partitioned1(a INT, b STRING) PARTITIONED BY(part INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@partitioned1 @@ -210,13 +196,11 @@ col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_ # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment a int 1 4 0 5 from deserializer -PREHOOK: query: -- Table-Non-Cascade ADD COLUMNS ... -alter table partitioned1 add columns(c int, d string) +PREHOOK: query: alter table partitioned1 add columns(c int, d string) PREHOOK: type: ALTERTABLE_ADDCOLS PREHOOK: Input: default@partitioned1 PREHOOK: Output: default@partitioned1 -POSTHOOK: query: -- Table-Non-Cascade ADD COLUMNS ... -alter table partitioned1 add columns(c int, d string) +POSTHOOK: query: alter table partitioned1 add columns(c int, d string) POSTHOOK: type: ALTERTABLE_ADDCOLS POSTHOOK: Input: default@partitioned1 POSTHOOK: Output: default@partitioned1 diff --git a/ql/src/test/results/clientpositive/autoColumnStats_7.q.out b/ql/src/test/results/clientpositive/autoColumnStats_7.q.out index 2b3bbce..82768d1 100644 --- a/ql/src/test/results/clientpositive/autoColumnStats_7.q.out +++ b/ql/src/test/results/clientpositive/autoColumnStats_7.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Taken from groupby2.q -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_g2 -POSTHOOK: query: -- Taken from groupby2.q -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_g2 diff --git a/ql/src/test/results/clientpositive/autoColumnStats_8.q.out b/ql/src/test/results/clientpositive/autoColumnStats_8.q.out index 5d9e5ab..3989c4b 100644 --- a/ql/src/test/results/clientpositive/autoColumnStats_8.q.out +++ b/ql/src/test/results/clientpositive/autoColumnStats_8.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +PREHOOK: query: show partitions srcpart PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@srcpart -POSTHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +POSTHOOK: query: show partitions srcpart POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@srcpart ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/autoColumnStats_9.q.out b/ql/src/test/results/clientpositive/autoColumnStats_9.q.out index c7582fc..a90d180 100644 --- a/ql/src/test/results/clientpositive/autoColumnStats_9.q.out +++ b/ql/src/test/results/clientpositive/autoColumnStats_9.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/auto_join0.q.out b/ql/src/test/results/clientpositive/auto_join0.q.out index 17c8dde..77940b3 100644 --- a/ql/src/test/results/clientpositive/auto_join0.q.out +++ b/ql/src/test/results/clientpositive/auto_join0.q.out @@ -1,9 +1,7 @@ Warning: Map Join MAPJOIN[33][bigTable=?] in task 'Stage-7:MAPRED' is a cross product Warning: Map Join MAPJOIN[32][bigTable=?] in task 'Stage-6:MAPRED' is a cross product Warning: Shuffle Join JOIN[12][tables = [src1, src2]] in Stage 'Stage-2:MAPRED' is a cross product -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain select sum(hash(a.k1,a.v1,a.k2, a.v2)) from ( SELECT src1.key as k1, src1.value as v1, @@ -14,9 +12,7 @@ SELECT src1.key as k1, src1.value as v1, SORT BY k1, v1, k2, v2 ) a PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain select sum(hash(a.k1,a.v1,a.k2, a.v2)) from ( SELECT src1.key as k1, src1.value as v1, diff --git a/ql/src/test/results/clientpositive/auto_join1.q.out b/ql/src/test/results/clientpositive/auto_join1.q.out index 1ce2663..5f4bb74 100644 --- a/ql/src/test/results/clientpositive/auto_join1.q.out +++ b/ql/src/test/results/clientpositive/auto_join1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/auto_join14.q.out b/ql/src/test/results/clientpositive/auto_join14.q.out index 4a0f2f9..1dd677c 100644 --- a/ql/src/test/results/clientpositive/auto_join14.q.out +++ b/ql/src/test/results/clientpositive/auto_join14.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) - -CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) - -CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/auto_join21.q.out b/ql/src/test/results/clientpositive/auto_join21.q.out index b1d03e4..9c26055 100644 --- a/ql/src/test/results/clientpositive/auto_join21.q.out +++ b/ql/src/test/results/clientpositive/auto_join21.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/auto_join23.q.out b/ql/src/test/results/clientpositive/auto_join23.q.out index c0184cc..7128ce5 100644 --- a/ql/src/test/results/clientpositive/auto_join23.q.out +++ b/ql/src/test/results/clientpositive/auto_join23.q.out @@ -1,12 +1,8 @@ Warning: Map Join MAPJOIN[14][bigTable=?] in task 'Stage-2:MAPRED' is a cross product -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain SELECT * FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 SORT BY src1.key, src1.value, src2.key, src2.value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain SELECT * FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 SORT BY src1.key, src1.value, src2.key, src2.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/auto_join25.q.out b/ql/src/test/results/clientpositive/auto_join25.q.out index 08cbe42..534bdb6 100644 --- a/ql/src/test/results/clientpositive/auto_join25.q.out +++ b/ql/src/test/results/clientpositive/auto_join25.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- This test tests the scenario when the mapper dies. So, create a conditional task for the mapjoin -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- This test tests the scenario when the mapper dies. So, create a conditional task for the mapjoin -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/auto_join26.q.out b/ql/src/test/results/clientpositive/auto_join26.q.out index c190621..b05145d 100644 --- a/ql/src/test/results/clientpositive/auto_join26.q.out +++ b/ql/src/test/results/clientpositive/auto_join26.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, cnt INT) +PREHOOK: query: CREATE TABLE dest_j1(key INT, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, cnt INT) +POSTHOOK: query: CREATE TABLE dest_j1(key INT, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/auto_join29.q.out b/ql/src/test/results/clientpositive/auto_join29.q.out index a4a2e9f..147da9f 100644 --- a/ql/src/test/results/clientpositive/auto_join29.q.out +++ b/ql/src/test/results/clientpositive/auto_join29.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/auto_join32.q.out b/ql/src/test/results/clientpositive/auto_join32.q.out index 2ebec97..4a233d4 100644 --- a/ql/src/test/results/clientpositive/auto_join32.q.out +++ b/ql/src/test/results/clientpositive/auto_join32.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- empty tables -create table studenttab10k (name string, age int, gpa double) +PREHOOK: query: create table studenttab10k (name string, age int, gpa double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@studenttab10k -POSTHOOK: query: -- empty tables -create table studenttab10k (name string, age int, gpa double) +POSTHOOK: query: create table studenttab10k (name string, age int, gpa double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@studenttab10k @@ -126,13 +124,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@studenttab10k POSTHOOK: Input: default@votertab10k #### A masked pattern was here #### -PREHOOK: query: -- smb -create table studenttab10k_smb (name string, age int, gpa double) clustered by (name) sorted by (name) into 2 buckets +PREHOOK: query: create table studenttab10k_smb (name string, age int, gpa double) clustered by (name) sorted by (name) into 2 buckets PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@studenttab10k_smb -POSTHOOK: query: -- smb -create table studenttab10k_smb (name string, age int, gpa double) clustered by (name) sorted by (name) into 2 buckets +POSTHOOK: query: create table studenttab10k_smb (name string, age int, gpa double) clustered by (name) sorted by (name) into 2 buckets POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@studenttab10k_smb @@ -334,13 +330,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@studenttab10k_smb POSTHOOK: Input: default@votertab10k_smb #### A masked pattern was here #### -PREHOOK: query: -- smb + partitions -create table studenttab10k_part (name string, age int, gpa double) partitioned by (p string) clustered by (name) sorted by (name) into 2 buckets +PREHOOK: query: create table studenttab10k_part (name string, age int, gpa double) partitioned by (p string) clustered by (name) sorted by (name) into 2 buckets PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@studenttab10k_part -POSTHOOK: query: -- smb + partitions -create table studenttab10k_part (name string, age int, gpa double) partitioned by (p string) clustered by (name) sorted by (name) into 2 buckets +POSTHOOK: query: create table studenttab10k_part (name string, age int, gpa double) partitioned by (p string) clustered by (name) sorted by (name) into 2 buckets POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@studenttab10k_part diff --git a/ql/src/test/results/clientpositive/auto_join33.q.out b/ql/src/test/results/clientpositive/auto_join33.q.out index 5a8bf8c..a347fcd 100644 --- a/ql/src/test/results/clientpositive/auto_join33.q.out +++ b/ql/src/test/results/clientpositive/auto_join33.q.out @@ -1,15 +1,11 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain SELECT * FROM (SELECT * FROM src WHERE key+1 < 10) a JOIN (SELECT * FROM src WHERE key+2 < 10) b ON a.key+1=b.key+2 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain SELECT * FROM (SELECT * FROM src WHERE key+1 < 10) a JOIN diff --git a/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out b/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out index b245706..214c1df 100644 --- a/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out +++ b/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- HIVE-5056 RS has expression list for values, but it's ignored in MapJoinProcessor - -create table testsrc ( `key` int,`val` string) +PREHOOK: query: create table testsrc ( `key` int,`val` string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@testsrc -POSTHOOK: query: -- HIVE-5056 RS has expression list for values, but it's ignored in MapJoinProcessor - -create table testsrc ( `key` int,`val` string) +POSTHOOK: query: create table testsrc ( `key` int,`val` string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@testsrc diff --git a/ql/src/test/results/clientpositive/auto_join_stats.q.out b/ql/src/test/results/clientpositive/auto_join_stats.q.out index 675179e..e80af96 100644 --- a/ql/src/test/results/clientpositive/auto_join_stats.q.out +++ b/ql/src/test/results/clientpositive/auto_join_stats.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- Setting HTS(src2) < threshold < HTS(src2) + HTS(smalltable). --- This query plan should thus not try to combine the mapjoin into a single work. - -create table smalltable(key string, value string) stored as textfile +PREHOOK: query: create table smalltable(key string, value string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@smalltable -POSTHOOK: query: -- Setting HTS(src2) < threshold < HTS(src2) + HTS(smalltable). --- This query plan should thus not try to combine the mapjoin into a single work. - -create table smalltable(key string, value string) stored as textfile +POSTHOOK: query: create table smalltable(key string, value string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@smalltable diff --git a/ql/src/test/results/clientpositive/auto_join_stats2.q.out b/ql/src/test/results/clientpositive/auto_join_stats2.q.out index 11d7c56..6ea5afa 100644 --- a/ql/src/test/results/clientpositive/auto_join_stats2.q.out +++ b/ql/src/test/results/clientpositive/auto_join_stats2.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- Auto_join2 no longer tests merging the mapjoin work if big-table selection is based on stats, as src3 is smaller statistically than src1 + src2. --- Hence forcing the third table to be smaller. - -create table smalltable(key string, value string) stored as textfile +PREHOOK: query: create table smalltable(key string, value string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@smalltable -POSTHOOK: query: -- Auto_join2 no longer tests merging the mapjoin work if big-table selection is based on stats, as src3 is smaller statistically than src1 + src2. --- Hence forcing the third table to be smaller. - -create table smalltable(key string, value string) stored as textfile +POSTHOOK: query: create table smalltable(key string, value string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@smalltable diff --git a/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out b/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out index aa6c5a9..17a912e 100644 --- a/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out +++ b/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out @@ -1041,8 +1041,7 @@ RUN: Stage-3:MAPRED 119 val_119 119 val_119 119 val_119 -PREHOOK: query: -- fallback to common join -select a.* from src a join src b on a.key=b.key join src c on a.value=c.value where a.key>100 order by a.key, a.value limit 40 +PREHOOK: query: select a.* from src a join src b on a.key=b.key join src c on a.value=c.value where a.key>100 order by a.key, a.value limit 40 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### @@ -1050,8 +1049,7 @@ FAILED: Execution Error, return code 3 from org.apache.hadoop.hive.ql.exec.mr.Ma ATTEMPT: Execute BackupTask: org.apache.hadoop.hive.ql.exec.mr.MapRedTask FAILED: Execution Error, return code 3 from org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask ATTEMPT: Execute BackupTask: org.apache.hadoop.hive.ql.exec.mr.MapRedTask -POSTHOOK: query: -- fallback to common join -select a.* from src a join src b on a.key=b.key join src c on a.value=c.value where a.key>100 order by a.key, a.value limit 40 +POSTHOOK: query: select a.* from src a join src b on a.key=b.key join src c on a.value=c.value where a.key>100 order by a.key, a.value limit 40 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out index b1d2b23..6e4d112 100644 --- a/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out +++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_1.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket - -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket - -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -103,11 +99,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.t POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_10.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_10.q.out index 2e99e9c..22ac2a2 100644 --- a/ql/src/test/results/clientpositive/auto_sortmerge_join_10.q.out +++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_10.q.out @@ -38,8 +38,7 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@tbl2 POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- One of the subqueries contains a union, so it should not be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from ( select * from @@ -51,8 +50,7 @@ select count(*) from (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- One of the subqueries contains a union, so it should not be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from ( select * from @@ -217,16 +215,14 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 40 -PREHOOK: query: -- One of the subqueries contains a groupby, so it should not be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, count(*) as value from tbl1 a where key < 6 group by a.key) subq1 join (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- One of the subqueries contains a groupby, so it should not be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, count(*) as value from tbl1 a where key < 6 group by a.key) subq1 join diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out index 82a8e93..b216c60 100644 --- a/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out +++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_11.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket - -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket - -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small @@ -417,17 +413,9 @@ POSTHOOK: Input: default@bucket_small POSTHOOK: Input: default@bucket_small@ds=2008-04-08 #### A masked pattern was here #### 38 -PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter --- The tables are only bucketed and not sorted, the join should not be converted --- Currenly, a join is only converted to a sort-merge join without a hint, automatic conversion to --- bucketized mapjoin is not done -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter --- The tables are only bucketed and not sorted, the join should not be converted --- Currenly, a join is only converted to a sort-merge join without a hint, automatic conversion to --- bucketized mapjoin is not done -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-5 is a root stage @@ -741,11 +729,9 @@ POSTHOOK: Input: default@bucket_small POSTHOOK: Input: default@bucket_small@ds=2008-04-08 #### A masked pattern was here #### 38 -PREHOOK: query: -- The join is converted to a bucketed mapjoin with a mapjoin hint -explain extended select /*+ mapjoin(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select /*+ mapjoin(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- The join is converted to a bucketed mapjoin with a mapjoin hint -explain extended select /*+ mapjoin(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select /*+ mapjoin(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -1012,11 +998,9 @@ POSTHOOK: Input: default@bucket_small POSTHOOK: Input: default@bucket_small@ds=2008-04-08 #### A masked pattern was here #### 38 -PREHOOK: query: -- HIVE-7023 -explain extended select /* + MAPJOIN(a,b) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key JOIN bucket_big c ON a.key = c.key +PREHOOK: query: explain extended select /* + MAPJOIN(a,b) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key JOIN bucket_big c ON a.key = c.key PREHOOK: type: QUERY -POSTHOOK: query: -- HIVE-7023 -explain extended select /* + MAPJOIN(a,b) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key JOIN bucket_big c ON a.key = c.key +POSTHOOK: query: explain extended select /* + MAPJOIN(a,b) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key JOIN bucket_big c ON a.key = c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out index 0a4ffee..99f0206 100644 --- a/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out +++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_12.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket - -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket - -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_13.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_13.q.out index b45411c..504f2da 100644 --- a/ql/src/test/results/clientpositive/auto_sortmerge_join_13.q.out +++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_13.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tbl1 @@ -54,8 +50,7 @@ POSTHOOK: query: CREATE TABLE dest2(k1 string, k2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest2 -PREHOOK: query: -- A SMB join followed by a mutli-insert -explain +PREHOOK: query: explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 FROM tbl1 a JOIN tbl2 b @@ -63,8 +58,7 @@ from ( INSERT OVERWRITE TABLE dest1 select key1, key2 INSERT OVERWRITE TABLE dest2 select value1, value2 PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join followed by a mutli-insert -explain +POSTHOOK: query: explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 FROM tbl1 a JOIN tbl2 b @@ -232,8 +226,7 @@ val_5 val_5 val_5 val_5 val_8 val_8 val_9 val_9 -PREHOOK: query: -- A SMB join followed by a mutli-insert -explain +PREHOOK: query: explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 FROM tbl1 a JOIN tbl2 b @@ -241,8 +234,7 @@ from ( INSERT OVERWRITE TABLE dest1 select key1, key2 INSERT OVERWRITE TABLE dest2 select value1, value2 PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join followed by a mutli-insert -explain +POSTHOOK: query: explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 FROM tbl1 a JOIN tbl2 b @@ -410,8 +402,7 @@ val_5 val_5 val_5 val_5 val_8 val_8 val_9 val_9 -PREHOOK: query: -- A SMB join followed by a mutli-insert -explain +PREHOOK: query: explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 FROM tbl1 a JOIN tbl2 b @@ -419,8 +410,7 @@ from ( INSERT OVERWRITE TABLE dest1 select key1, key2 INSERT OVERWRITE TABLE dest2 select value1, value2 PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join followed by a mutli-insert -explain +POSTHOOK: query: explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 FROM tbl1 a JOIN tbl2 b diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_14.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_14.q.out index ec0e6b7..cc4cbfb 100644 --- a/ql/src/test/results/clientpositive/auto_sortmerge_join_14.q.out +++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_14.q.out @@ -34,12 +34,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@tbl2 POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Since tbl1 is the bigger table, tbl1 Left Outer Join tbl2 can be performed -explain +PREHOOK: query: explain select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since tbl1 is the bigger table, tbl1 Left Outer Join tbl2 can be performed -explain +POSTHOOK: query: explain select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -172,12 +170,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@tbl2 POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Since tbl2 is the bigger table, tbl1 Right Outer Join tbl2 can be performed -explain +PREHOOK: query: explain select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since tbl2 is the bigger table, tbl1 Right Outer Join tbl2 can be performed -explain +POSTHOOK: query: explain select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_2.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_2.q.out index 1d59a0d..9151371 100644 --- a/ql/src/test/results/clientpositive/auto_sortmerge_join_2.q.out +++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_2.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small 1 part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 1 part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small @@ -83,11 +81,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.t POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: -- Since the leftmost table is assumed as the big table, arrange the tables in the join accordingly -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since the leftmost table is assumed as the big table, arrange the tables in the join accordingly -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -278,11 +274,9 @@ POSTHOOK: Input: default@bucket_small POSTHOOK: Input: default@bucket_small@ds=2008-04-08 #### A masked pattern was here #### 38 -PREHOOK: query: -- The mapjoin should fail resulting in the sort-merge join -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- The mapjoin should fail resulting in the sort-merge join -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-5 is a root stage , consists of Stage-6, Stage-7, Stage-1 diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_3.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_3.q.out index 026fde7..42e0bad 100644 --- a/ql/src/test/results/clientpositive/auto_sortmerge_join_3.q.out +++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_3.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small 2 part, 2 bucket & big 1 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 2 part, 2 bucket & big 1 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small @@ -83,11 +81,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.t POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_4.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_4.q.out index 8037ff5..3beea70 100644 --- a/ql/src/test/results/clientpositive/auto_sortmerge_join_4.q.out +++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_4.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small 2 part, 4 bucket & big 1 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 2 part, 4 bucket & big 1 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small @@ -99,11 +97,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.t POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_5.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_5.q.out index e2deba4..ea0d25b 100644 --- a/ql/src/test/results/clientpositive/auto_sortmerge_join_5.q.out +++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_5.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- small no part, 4 bucket & big no part, 2 bucket - --- SORT_QUERY_RESULTS - -CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small no part, 4 bucket & big no part, 2 bucket - --- SORT_QUERY_RESULTS - -CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small @@ -70,11 +62,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.t POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@bucket_big -PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_7.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_7.q.out index d7a053f..8f6ea4b 100644 --- a/ql/src/test/results/clientpositive/auto_sortmerge_join_7.q.out +++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_7.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small 2 part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 2 part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small @@ -116,11 +114,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.t POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/autogen_colalias.q.out b/ql/src/test/results/clientpositive/autogen_colalias.q.out index ec049a7..ff6a05f 100644 --- a/ql/src/test/results/clientpositive/autogen_colalias.q.out +++ b/ql/src/test/results/clientpositive/autogen_colalias.q.out @@ -124,11 +124,9 @@ POSTHOOK: query: describe dest_grouped_new2 POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@dest_grouped_new2 key string -PREHOOK: query: -- Drop the temporary function at the end till HIVE-3160 gets fixed -DROP TEMPORARY FUNCTION test_max +PREHOOK: query: DROP TEMPORARY FUNCTION test_max PREHOOK: type: DROPFUNCTION PREHOOK: Output: test_max -POSTHOOK: query: -- Drop the temporary function at the end till HIVE-3160 gets fixed -DROP TEMPORARY FUNCTION test_max +POSTHOOK: query: DROP TEMPORARY FUNCTION test_max POSTHOOK: type: DROPFUNCTION POSTHOOK: Output: test_max diff --git a/ql/src/test/results/clientpositive/avro_add_column.q.out b/ql/src/test/results/clientpositive/avro_add_column.q.out index 5e44842..57c7d80 100644 --- a/ql/src/test/results/clientpositive/avro_add_column.q.out +++ b/ql/src/test/results/clientpositive/avro_add_column.q.out @@ -1,17 +1,11 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- verify that we can actually read avro files -CREATE TABLE doctors ( +PREHOOK: query: CREATE TABLE doctors ( number int, first_name string) STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@doctors -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- verify that we can actually read avro files -CREATE TABLE doctors ( +POSTHOOK: query: CREATE TABLE doctors ( number int, first_name string) STORED AS AVRO diff --git a/ql/src/test/results/clientpositive/avro_add_column2.q.out b/ql/src/test/results/clientpositive/avro_add_column2.q.out index 630ae22..599f93e 100644 --- a/ql/src/test/results/clientpositive/avro_add_column2.q.out +++ b/ql/src/test/results/clientpositive/avro_add_column2.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- verify that we can actually read avro files -CREATE TABLE doctors ( +PREHOOK: query: CREATE TABLE doctors ( number int, first_name string, last_name string) @@ -9,10 +6,7 @@ STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@doctors -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- verify that we can actually read avro files -CREATE TABLE doctors ( +POSTHOOK: query: CREATE TABLE doctors ( number int, first_name string, last_name string) diff --git a/ql/src/test/results/clientpositive/avro_add_column3.q.out b/ql/src/test/results/clientpositive/avro_add_column3.q.out index ced2d5c..2c8bd20 100644 --- a/ql/src/test/results/clientpositive/avro_add_column3.q.out +++ b/ql/src/test/results/clientpositive/avro_add_column3.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- verify that we can actually read avro files -CREATE TABLE doctors ( +PREHOOK: query: CREATE TABLE doctors ( number int, first_name string, last_name string) @@ -9,10 +6,7 @@ STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@doctors -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- verify that we can actually read avro files -CREATE TABLE doctors ( +POSTHOOK: query: CREATE TABLE doctors ( number int, first_name string, last_name string) diff --git a/ql/src/test/results/clientpositive/avro_change_schema.q.out b/ql/src/test/results/clientpositive/avro_change_schema.q.out index 8c302d1..a3528ab 100644 --- a/ql/src/test/results/clientpositive/avro_change_schema.q.out +++ b/ql/src/test/results/clientpositive/avro_change_schema.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- verify that we can update the table properties -CREATE TABLE avro2 +PREHOOK: query: CREATE TABLE avro2 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -15,8 +14,7 @@ TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "org.apache.hive", PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@avro2 -POSTHOOK: query: -- verify that we can update the table properties -CREATE TABLE avro2 +POSTHOOK: query: CREATE TABLE avro2 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS diff --git a/ql/src/test/results/clientpositive/avro_comments.q.out b/ql/src/test/results/clientpositive/avro_comments.q.out index aab0e6a..fbd4727 100644 --- a/ql/src/test/results/clientpositive/avro_comments.q.out +++ b/ql/src/test/results/clientpositive/avro_comments.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- verify Avro columns comments -DROP TABLE IF EXISTS testAvroComments1 +PREHOOK: query: DROP TABLE IF EXISTS testAvroComments1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- verify Avro columns comments -DROP TABLE IF EXISTS testAvroComments1 +POSTHOOK: query: DROP TABLE IF EXISTS testAvroComments1 POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE testAvroComments1 ROW FORMAT diff --git a/ql/src/test/results/clientpositive/avro_compression_enabled.q.out b/ql/src/test/results/clientpositive/avro_compression_enabled.q.out index 8820107..d0122bf 100644 --- a/ql/src/test/results/clientpositive/avro_compression_enabled.q.out +++ b/ql/src/test/results/clientpositive/avro_compression_enabled.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- verify that new joins bring in correct schemas (including evolved schemas) - -CREATE TABLE doctors4 +PREHOOK: query: CREATE TABLE doctors4 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -37,9 +35,7 @@ TBLPROPERTIES ('avro.schema.literal'='{ PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@doctors4 -POSTHOOK: query: -- verify that new joins bring in correct schemas (including evolved schemas) - -CREATE TABLE doctors4 +POSTHOOK: query: CREATE TABLE doctors4 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS diff --git a/ql/src/test/results/clientpositive/avro_compression_enabled_native.q.out b/ql/src/test/results/clientpositive/avro_compression_enabled_native.q.out index 687f17e..43cf190 100644 --- a/ql/src/test/results/clientpositive/avro_compression_enabled_native.q.out +++ b/ql/src/test/results/clientpositive/avro_compression_enabled_native.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- verify that new joins bring in correct schemas (including evolved schemas) - -CREATE TABLE doctors4 ( +PREHOOK: query: CREATE TABLE doctors4 ( number int, first_name string, last_name string, @@ -9,9 +7,7 @@ STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@doctors4 -POSTHOOK: query: -- verify that new joins bring in correct schemas (including evolved schemas) - -CREATE TABLE doctors4 ( +POSTHOOK: query: CREATE TABLE doctors4 ( number int, first_name string, last_name string, diff --git a/ql/src/test/results/clientpositive/avro_deserialize_map_null.q.out b/ql/src/test/results/clientpositive/avro_deserialize_map_null.q.out index 2d983f1..cfd37de 100644 --- a/ql/src/test/results/clientpositive/avro_deserialize_map_null.q.out +++ b/ql/src/test/results/clientpositive/avro_deserialize_map_null.q.out @@ -1,20 +1,6 @@ -PREHOOK: query: -- These test attempts to deserialize an Avro file that contains map null values, and the file schema --- vs record schema have the null values in different positions --- i.e. --- fileSchema = [{ "type" : "map", "values" : ["string","null"]}, "null"] --- recordSchema = ["null", { "type" : "map", "values" : ["string","null"]}] - - -DROP TABLE IF EXISTS avro_table +PREHOOK: query: DROP TABLE IF EXISTS avro_table PREHOOK: type: DROPTABLE -POSTHOOK: query: -- These test attempts to deserialize an Avro file that contains map null values, and the file schema --- vs record schema have the null values in different positions --- i.e. --- fileSchema = [{ "type" : "map", "values" : ["string","null"]}, "null"] --- recordSchema = ["null", { "type" : "map", "values" : ["string","null"]}] - - -DROP TABLE IF EXISTS avro_table +POSTHOOK: query: DROP TABLE IF EXISTS avro_table POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE avro_table (avreau_col_1 map) STORED AS AVRO PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/avro_evolved_schemas.q.out b/ql/src/test/results/clientpositive/avro_evolved_schemas.q.out index e7771f6..ab6da06 100644 --- a/ql/src/test/results/clientpositive/avro_evolved_schemas.q.out +++ b/ql/src/test/results/clientpositive/avro_evolved_schemas.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- verify that new fields in schema get propagated to table scans -CREATE TABLE doctors_with_new_field +PREHOOK: query: CREATE TABLE doctors_with_new_field ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -38,10 +35,7 @@ TBLPROPERTIES ('avro.schema.literal'='{ PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@doctors_with_new_field -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- verify that new fields in schema get propagated to table scans -CREATE TABLE doctors_with_new_field +POSTHOOK: query: CREATE TABLE doctors_with_new_field ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS diff --git a/ql/src/test/results/clientpositive/avro_joins.q.out b/ql/src/test/results/clientpositive/avro_joins.q.out index b9f233b..24a14d5 100644 --- a/ql/src/test/results/clientpositive/avro_joins.q.out +++ b/ql/src/test/results/clientpositive/avro_joins.q.out @@ -1,8 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- verify that new joins bring in correct schemas (including evolved schemas) - -CREATE TABLE doctors4 +PREHOOK: query: CREATE TABLE doctors4 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -39,11 +35,7 @@ TBLPROPERTIES ('avro.schema.literal'='{ PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@doctors4 -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- verify that new joins bring in correct schemas (including evolved schemas) - -CREATE TABLE doctors4 +POSTHOOK: query: CREATE TABLE doctors4 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS diff --git a/ql/src/test/results/clientpositive/avro_joins_native.q.out b/ql/src/test/results/clientpositive/avro_joins_native.q.out index 09a1549..b2ece57 100644 --- a/ql/src/test/results/clientpositive/avro_joins_native.q.out +++ b/ql/src/test/results/clientpositive/avro_joins_native.q.out @@ -1,8 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- verify that new joins bring in correct schemas (including evolved schemas) - -CREATE TABLE doctors4 ( +PREHOOK: query: CREATE TABLE doctors4 ( number int COMMENT "Order of playing the role", first_name string COMMENT "first name of actor playing role", last_name string COMMENT "last name of actor playing role") @@ -10,11 +6,7 @@ STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@doctors4 -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- verify that new joins bring in correct schemas (including evolved schemas) - -CREATE TABLE doctors4 ( +POSTHOOK: query: CREATE TABLE doctors4 ( number int COMMENT "Order of playing the role", first_name string COMMENT "first name of actor playing role", last_name string COMMENT "last name of actor playing role") diff --git a/ql/src/test/results/clientpositive/avro_native.q.out b/ql/src/test/results/clientpositive/avro_native.q.out index 03e64c4..17d8280 100644 --- a/ql/src/test/results/clientpositive/avro_native.q.out +++ b/ql/src/test/results/clientpositive/avro_native.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- verify that we can actually read avro files -CREATE TABLE doctors ( +PREHOOK: query: CREATE TABLE doctors ( number int, first_name string, last_name string) @@ -9,10 +6,7 @@ STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@doctors -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- verify that we can actually read avro files -CREATE TABLE doctors ( +POSTHOOK: query: CREATE TABLE doctors ( number int, first_name string, last_name string) diff --git a/ql/src/test/results/clientpositive/avro_nullable_fields.q.out b/ql/src/test/results/clientpositive/avro_nullable_fields.q.out index 2272b34..2d78274 100644 --- a/ql/src/test/results/clientpositive/avro_nullable_fields.q.out +++ b/ql/src/test/results/clientpositive/avro_nullable_fields.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- Verify that nullable fields properly work - - -CREATE TABLE test_serializer(string1 STRING, +PREHOOK: query: CREATE TABLE test_serializer(string1 STRING, int1 INT, tinyint1 TINYINT, smallint1 SMALLINT, @@ -21,10 +18,7 @@ CREATE TABLE test_serializer(string1 STRING, PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_serializer -POSTHOOK: query: -- Verify that nullable fields properly work - - -CREATE TABLE test_serializer(string1 STRING, +POSTHOOK: query: CREATE TABLE test_serializer(string1 STRING, int1 INT, tinyint1 TINYINT, smallint1 SMALLINT, diff --git a/ql/src/test/results/clientpositive/avro_nullable_union.q.out b/ql/src/test/results/clientpositive/avro_nullable_union.q.out index b80182e..d97aacc 100644 --- a/ql/src/test/results/clientpositive/avro_nullable_union.q.out +++ b/ql/src/test/results/clientpositive/avro_nullable_union.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- verify that we can write a nullable union type column with both nullable and non-nullable data - -DROP TABLE IF EXISTS union_nullable_test_text +PREHOOK: query: DROP TABLE IF EXISTS union_nullable_test_text PREHOOK: type: DROPTABLE -POSTHOOK: query: -- verify that we can write a nullable union type column with both nullable and non-nullable data - -DROP TABLE IF EXISTS union_nullable_test_text +POSTHOOK: query: DROP TABLE IF EXISTS union_nullable_test_text POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE union_nullable_test_text (id int, value uniontype) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY ':' STORED AS textfile PREHOOK: type: CREATETABLE @@ -71,13 +67,9 @@ POSTHOOK: query: DROP TABLE union_nullable_test_text POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@union_nullable_test_text POSTHOOK: Output: default@union_nullable_test_text -PREHOOK: query: -- verify that we can write a non nullable union type column with non-nullable data - -DROP TABLE IF EXISTS union_non_nullable_test_text +PREHOOK: query: DROP TABLE IF EXISTS union_non_nullable_test_text PREHOOK: type: DROPTABLE -POSTHOOK: query: -- verify that we can write a non nullable union type column with non-nullable data - -DROP TABLE IF EXISTS union_non_nullable_test_text +POSTHOOK: query: DROP TABLE IF EXISTS union_non_nullable_test_text POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE union_non_nullable_test_text (id int, value uniontype) ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t' COLLECTION ITEMS TERMINATED BY ':' STORED AS textfile PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/avro_partitioned.q.out b/ql/src/test/results/clientpositive/avro_partitioned.q.out index bd45978..b85b4ed 100644 --- a/ql/src/test/results/clientpositive/avro_partitioned.q.out +++ b/ql/src/test/results/clientpositive/avro_partitioned.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- Verify that table scans work with partitioned Avro tables -CREATE TABLE episodes +PREHOOK: query: CREATE TABLE episodes ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -31,9 +29,7 @@ TBLPROPERTIES ('avro.schema.literal'='{ PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@episodes -POSTHOOK: query: -- SORT_QUERY_RESULTS --- Verify that table scans work with partitioned Avro tables -CREATE TABLE episodes +POSTHOOK: query: CREATE TABLE episodes ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -186,8 +182,7 @@ POSTHOOK: Input: default@episodes_partitioned@doctor_pt=9 Rose 26 March 2005 9 9 The Doctor's Wife 14 May 2011 11 11 The Eleventh Hour 3 April 2010 11 11 -PREHOOK: query: -- Verify that Fetch works in addition to Map -SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5 +PREHOOK: query: SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5 PREHOOK: type: QUERY PREHOOK: Input: default@episodes_partitioned PREHOOK: Input: default@episodes_partitioned@doctor_pt=1 @@ -198,8 +193,7 @@ PREHOOK: Input: default@episodes_partitioned@doctor_pt=5 PREHOOK: Input: default@episodes_partitioned@doctor_pt=6 PREHOOK: Input: default@episodes_partitioned@doctor_pt=9 #### A masked pattern was here #### -POSTHOOK: query: -- Verify that Fetch works in addition to Map -SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5 +POSTHOOK: query: SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@episodes_partitioned POSTHOOK: Input: default@episodes_partitioned@doctor_pt=1 @@ -215,35 +209,29 @@ Horror of Fang Rock 3 September 1977 4 4 Rose 26 March 2005 9 9 The Doctor's Wife 14 May 2011 11 11 The Eleventh Hour 3 April 2010 11 11 -PREHOOK: query: -- Fetch w/filter to specific partition -SELECT * FROM episodes_partitioned WHERE doctor_pt = 6 +PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 6 PREHOOK: type: QUERY PREHOOK: Input: default@episodes_partitioned PREHOOK: Input: default@episodes_partitioned@doctor_pt=6 #### A masked pattern was here #### -POSTHOOK: query: -- Fetch w/filter to specific partition -SELECT * FROM episodes_partitioned WHERE doctor_pt = 6 +POSTHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 6 POSTHOOK: type: QUERY POSTHOOK: Input: default@episodes_partitioned POSTHOOK: Input: default@episodes_partitioned@doctor_pt=6 #### A masked pattern was here #### The Mysterious Planet 6 September 1986 6 6 -PREHOOK: query: -- Fetch w/non-existent partition -SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5 +PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5 PREHOOK: type: QUERY PREHOOK: Input: default@episodes_partitioned #### A masked pattern was here #### -POSTHOOK: query: -- Fetch w/non-existent partition -SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5 +POSTHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@episodes_partitioned #### A masked pattern was here #### -PREHOOK: query: -- Alter table add an empty partition -ALTER TABLE episodes_partitioned ADD PARTITION (doctor_pt=7) +PREHOOK: query: ALTER TABLE episodes_partitioned ADD PARTITION (doctor_pt=7) PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Output: default@episodes_partitioned -POSTHOOK: query: -- Alter table add an empty partition -ALTER TABLE episodes_partitioned ADD PARTITION (doctor_pt=7) +POSTHOOK: query: ALTER TABLE episodes_partitioned ADD PARTITION (doctor_pt=7) POSTHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: Output: default@episodes_partitioned POSTHOOK: Output: default@episodes_partitioned@doctor_pt=7 @@ -256,11 +244,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@episodes_partitioned #### A masked pattern was here #### 8 -PREHOOK: query: -- Verify that reading from an Avro partition works --- even if it has an old schema relative to the current table level schema - --- Create table and store schema in SERDEPROPERTIES -CREATE TABLE episodes_partitioned_serdeproperties +PREHOOK: query: CREATE TABLE episodes_partitioned_serdeproperties PARTITIONED BY (doctor_pt INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' @@ -292,11 +276,7 @@ OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@episodes_partitioned_serdeproperties -POSTHOOK: query: -- Verify that reading from an Avro partition works --- even if it has an old schema relative to the current table level schema - --- Create table and store schema in SERDEPROPERTIES -CREATE TABLE episodes_partitioned_serdeproperties +POSTHOOK: query: CREATE TABLE episodes_partitioned_serdeproperties PARTITIONED BY (doctor_pt INT) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' @@ -328,13 +308,11 @@ OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@episodes_partitioned_serdeproperties -PREHOOK: query: -- Insert data into a partition -INSERT INTO TABLE episodes_partitioned_serdeproperties PARTITION (doctor_pt) SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes +PREHOOK: query: INSERT INTO TABLE episodes_partitioned_serdeproperties PARTITION (doctor_pt) SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes PREHOOK: type: QUERY PREHOOK: Input: default@episodes PREHOOK: Output: default@episodes_partitioned_serdeproperties -POSTHOOK: query: -- Insert data into a partition -INSERT INTO TABLE episodes_partitioned_serdeproperties PARTITION (doctor_pt) SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes +POSTHOOK: query: INSERT INTO TABLE episodes_partitioned_serdeproperties PARTITION (doctor_pt) SELECT title, air_date, doctor, doctor as doctor_pt FROM episodes POSTHOOK: type: QUERY POSTHOOK: Input: default@episodes POSTHOOK: Output: default@episodes_partitioned_serdeproperties@doctor_pt=1 @@ -365,8 +343,7 @@ POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=6).t POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=9).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ] POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=9).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ] POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=9).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ] -PREHOOK: query: -- Evolve the table schema by adding new array field "cast_and_crew" -ALTER TABLE episodes_partitioned_serdeproperties +PREHOOK: query: ALTER TABLE episodes_partitioned_serdeproperties SET SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' WITH SERDEPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", @@ -398,8 +375,7 @@ WITH SERDEPROPERTIES ('avro.schema.literal'='{ PREHOOK: type: ALTERTABLE_SERIALIZER PREHOOK: Input: default@episodes_partitioned_serdeproperties PREHOOK: Output: default@episodes_partitioned_serdeproperties -POSTHOOK: query: -- Evolve the table schema by adding new array field "cast_and_crew" -ALTER TABLE episodes_partitioned_serdeproperties +POSTHOOK: query: ALTER TABLE episodes_partitioned_serdeproperties SET SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' WITH SERDEPROPERTIES ('avro.schema.literal'='{ "namespace": "testing.hive.avro.serde", @@ -431,8 +407,7 @@ WITH SERDEPROPERTIES ('avro.schema.literal'='{ POSTHOOK: type: ALTERTABLE_SERIALIZER POSTHOOK: Input: default@episodes_partitioned_serdeproperties POSTHOOK: Output: default@episodes_partitioned_serdeproperties -PREHOOK: query: -- Try selecting from the evolved table -SELECT * FROM episodes_partitioned_serdeproperties +PREHOOK: query: SELECT * FROM episodes_partitioned_serdeproperties PREHOOK: type: QUERY PREHOOK: Input: default@episodes_partitioned_serdeproperties PREHOOK: Input: default@episodes_partitioned_serdeproperties@doctor_pt=1 @@ -443,8 +418,7 @@ PREHOOK: Input: default@episodes_partitioned_serdeproperties@doctor_pt=5 PREHOOK: Input: default@episodes_partitioned_serdeproperties@doctor_pt=6 PREHOOK: Input: default@episodes_partitioned_serdeproperties@doctor_pt=9 #### A masked pattern was here #### -POSTHOOK: query: -- Try selecting from the evolved table -SELECT * FROM episodes_partitioned_serdeproperties +POSTHOOK: query: SELECT * FROM episodes_partitioned_serdeproperties POSTHOOK: type: QUERY POSTHOOK: Input: default@episodes_partitioned_serdeproperties POSTHOOK: Input: default@episodes_partitioned_serdeproperties@doctor_pt=1 diff --git a/ql/src/test/results/clientpositive/avro_partitioned_native.q.out b/ql/src/test/results/clientpositive/avro_partitioned_native.q.out index ea11b4c..0c005e4 100644 --- a/ql/src/test/results/clientpositive/avro_partitioned_native.q.out +++ b/ql/src/test/results/clientpositive/avro_partitioned_native.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- Verify that table scans work with partitioned Avro tables -CREATE TABLE episodes ( +PREHOOK: query: CREATE TABLE episodes ( title string COMMENT "episode title", air_date string COMMENT "initial date", doctor int COMMENT "main actor playing the Doctor in episode") @@ -8,9 +6,7 @@ STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@episodes -POSTHOOK: query: -- SORT_QUERY_RESULTS --- Verify that table scans work with partitioned Avro tables -CREATE TABLE episodes ( +POSTHOOK: query: CREATE TABLE episodes ( title string COMMENT "episode title", air_date string COMMENT "initial date", doctor int COMMENT "main actor playing the Doctor in episode") @@ -96,8 +92,7 @@ POSTHOOK: Input: default@episodes_partitioned@doctor_pt=9 Rose 26 March 2005 9 9 The Doctor's Wife 14 May 2011 11 11 The Eleventh Hour 3 April 2010 11 11 -PREHOOK: query: -- Verify that Fetch works in addition to Map -SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5 +PREHOOK: query: SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5 PREHOOK: type: QUERY PREHOOK: Input: default@episodes_partitioned PREHOOK: Input: default@episodes_partitioned@doctor_pt=1 @@ -108,8 +103,7 @@ PREHOOK: Input: default@episodes_partitioned@doctor_pt=5 PREHOOK: Input: default@episodes_partitioned@doctor_pt=6 PREHOOK: Input: default@episodes_partitioned@doctor_pt=9 #### A masked pattern was here #### -POSTHOOK: query: -- Verify that Fetch works in addition to Map -SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5 +POSTHOOK: query: SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@episodes_partitioned POSTHOOK: Input: default@episodes_partitioned@doctor_pt=1 @@ -125,26 +119,22 @@ Horror of Fang Rock 3 September 1977 4 4 Rose 26 March 2005 9 9 The Doctor's Wife 14 May 2011 11 11 The Eleventh Hour 3 April 2010 11 11 -PREHOOK: query: -- Fetch w/filter to specific partition -SELECT * FROM episodes_partitioned WHERE doctor_pt = 6 +PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 6 PREHOOK: type: QUERY PREHOOK: Input: default@episodes_partitioned PREHOOK: Input: default@episodes_partitioned@doctor_pt=6 #### A masked pattern was here #### -POSTHOOK: query: -- Fetch w/filter to specific partition -SELECT * FROM episodes_partitioned WHERE doctor_pt = 6 +POSTHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 6 POSTHOOK: type: QUERY POSTHOOK: Input: default@episodes_partitioned POSTHOOK: Input: default@episodes_partitioned@doctor_pt=6 #### A masked pattern was here #### The Mysterious Planet 6 September 1986 6 6 -PREHOOK: query: -- Fetch w/non-existent partition -SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5 +PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5 PREHOOK: type: QUERY PREHOOK: Input: default@episodes_partitioned #### A masked pattern was here #### -POSTHOOK: query: -- Fetch w/non-existent partition -SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5 +POSTHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@episodes_partitioned #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/avro_sanity_test.q.out b/ql/src/test/results/clientpositive/avro_sanity_test.q.out index 250e661..fa4e921 100644 --- a/ql/src/test/results/clientpositive/avro_sanity_test.q.out +++ b/ql/src/test/results/clientpositive/avro_sanity_test.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- verify that we can actually read avro files -CREATE TABLE doctors +PREHOOK: query: CREATE TABLE doctors ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -32,10 +29,7 @@ TBLPROPERTIES ('avro.schema.literal'='{ PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@doctors -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- verify that we can actually read avro files -CREATE TABLE doctors +POSTHOOK: query: CREATE TABLE doctors ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS diff --git a/ql/src/test/results/clientpositive/avro_schema_evolution_native.q.out b/ql/src/test/results/clientpositive/avro_schema_evolution_native.q.out index 852a679..c92c751 100644 --- a/ql/src/test/results/clientpositive/avro_schema_evolution_native.q.out +++ b/ql/src/test/results/clientpositive/avro_schema_evolution_native.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- Verify that table scans work with partitioned Avro tables -CREATE TABLE episodes ( +PREHOOK: query: CREATE TABLE episodes ( title string COMMENT "episode title", air_date string COMMENT "initial date", doctor int COMMENT "main actor playing the Doctor in episode") @@ -8,9 +6,7 @@ STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@episodes -POSTHOOK: query: -- SORT_QUERY_RESULTS --- Verify that table scans work with partitioned Avro tables -CREATE TABLE episodes ( +POSTHOOK: query: CREATE TABLE episodes ( title string COMMENT "episode title", air_date string COMMENT "initial date", doctor int COMMENT "main actor playing the Doctor in episode") @@ -266,8 +262,7 @@ episodes_partitioned.title episodes_partitioned.air_date episodes_partitioned.do Rose 26 March 2005 9 0 9 The Doctor's Wife 14 May 2011 11 0 11 The Eleventh Hour 3 April 2010 11 0 11 -PREHOOK: query: -- Verify that Fetch works in addition to Map -SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5 +PREHOOK: query: SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5 PREHOOK: type: QUERY PREHOOK: Input: default@episodes_partitioned PREHOOK: Input: default@episodes_partitioned@doctor_pt=1 @@ -278,8 +273,7 @@ PREHOOK: Input: default@episodes_partitioned@doctor_pt=5 PREHOOK: Input: default@episodes_partitioned@doctor_pt=6 PREHOOK: Input: default@episodes_partitioned@doctor_pt=9 #### A masked pattern was here #### -POSTHOOK: query: -- Verify that Fetch works in addition to Map -SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5 +POSTHOOK: query: SELECT * FROM episodes_partitioned ORDER BY air_date LIMIT 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@episodes_partitioned POSTHOOK: Input: default@episodes_partitioned@doctor_pt=1 @@ -296,27 +290,23 @@ Horror of Fang Rock 3 September 1977 4 0 4 Rose 26 March 2005 9 0 9 The Doctor's Wife 14 May 2011 11 0 11 The Eleventh Hour 3 April 2010 11 0 11 -PREHOOK: query: -- Fetch w/filter to specific partition -SELECT * FROM episodes_partitioned WHERE doctor_pt = 6 +PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 6 PREHOOK: type: QUERY PREHOOK: Input: default@episodes_partitioned PREHOOK: Input: default@episodes_partitioned@doctor_pt=6 #### A masked pattern was here #### -POSTHOOK: query: -- Fetch w/filter to specific partition -SELECT * FROM episodes_partitioned WHERE doctor_pt = 6 +POSTHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 6 POSTHOOK: type: QUERY POSTHOOK: Input: default@episodes_partitioned POSTHOOK: Input: default@episodes_partitioned@doctor_pt=6 #### A masked pattern was here #### episodes_partitioned.title episodes_partitioned.air_date episodes_partitioned.doctor episodes_partitioned.value episodes_partitioned.doctor_pt The Mysterious Planet 6 September 1986 6 0 6 -PREHOOK: query: -- Fetch w/non-existent partition -SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5 +PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5 PREHOOK: type: QUERY PREHOOK: Input: default@episodes_partitioned #### A masked pattern was here #### -POSTHOOK: query: -- Fetch w/non-existent partition -SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5 +POSTHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt = 7 LIMIT 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@episodes_partitioned #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/avro_timestamp.q.out b/ql/src/test/results/clientpositive/avro_timestamp.q.out index 868807a..ca18fd9 100644 --- a/ql/src/test/results/clientpositive/avro_timestamp.q.out +++ b/ql/src/test/results/clientpositive/avro_timestamp.q.out @@ -1,12 +1,6 @@ -PREHOOK: query: -- Exclude test on Windows due to space character being escaped in Hive paths on Windows. --- EXCLUDE_OS_WINDOWS - -DROP TABLE avro_timestamp_staging +PREHOOK: query: DROP TABLE avro_timestamp_staging PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Exclude test on Windows due to space character being escaped in Hive paths on Windows. --- EXCLUDE_OS_WINDOWS - -DROP TABLE avro_timestamp_staging +POSTHOOK: query: DROP TABLE avro_timestamp_staging POSTHOOK: type: DROPTABLE PREHOOK: query: DROP TABLE avro_timestamp PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/avro_type_evolution.q.out b/ql/src/test/results/clientpositive/avro_type_evolution.q.out index 71ec99b..73b735d 100644 --- a/ql/src/test/results/clientpositive/avro_type_evolution.q.out +++ b/ql/src/test/results/clientpositive/avro_type_evolution.q.out @@ -1,12 +1,6 @@ -PREHOOK: query: -- File Schema { "name" : "val", "type" : [ "null", "int" ] } --- Record Schema { "name" : "val", "type" : [ "long", "null" ] } - -DROP TABLE IF EXISTS avro_type_evolution +PREHOOK: query: DROP TABLE IF EXISTS avro_type_evolution PREHOOK: type: DROPTABLE -POSTHOOK: query: -- File Schema { "name" : "val", "type" : [ "null", "int" ] } --- Record Schema { "name" : "val", "type" : [ "long", "null" ] } - -DROP TABLE IF EXISTS avro_type_evolution +POSTHOOK: query: DROP TABLE IF EXISTS avro_type_evolution POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE avro_type_evolution (val bigint) STORED AS AVRO TBLPROPERTIES ( diff --git a/ql/src/test/results/clientpositive/ba_table1.q.out b/ql/src/test/results/clientpositive/ba_table1.q.out index 14862a7..6643bb1 100644 --- a/ql/src/test/results/clientpositive/ba_table1.q.out +++ b/ql/src/test/results/clientpositive/ba_table1.q.out @@ -1,20 +1,12 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -drop table ba_test +PREHOOK: query: drop table ba_test PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -drop table ba_test +POSTHOOK: query: drop table ba_test POSTHOOK: type: DROPTABLE -PREHOOK: query: -- This query tests a) binary type works correctly in grammar b) string can be cast into binary c) binary can be stored in a table d) binary data can be loaded back again and queried d) order-by on a binary key - -create table ba_test (ba_key binary, ba_val binary) +PREHOOK: query: create table ba_test (ba_key binary, ba_val binary) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@ba_test -POSTHOOK: query: -- This query tests a) binary type works correctly in grammar b) string can be cast into binary c) binary can be stored in a table d) binary data can be loaded back again and queried d) order-by on a binary key - -create table ba_test (ba_key binary, ba_val binary) +POSTHOOK: query: create table ba_test (ba_key binary, ba_val binary) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@ba_test diff --git a/ql/src/test/results/clientpositive/ba_table2.q.out b/ql/src/test/results/clientpositive/ba_table2.q.out index 4159648..8694ac8 100644 --- a/ql/src/test/results/clientpositive/ba_table2.q.out +++ b/ql/src/test/results/clientpositive/ba_table2.q.out @@ -1,20 +1,12 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -drop table ba_test +PREHOOK: query: drop table ba_test PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -drop table ba_test +POSTHOOK: query: drop table ba_test POSTHOOK: type: DROPTABLE -PREHOOK: query: -- All the test in ba_test1.q + using LazyBinarySerde instead of LazySimpleSerde - -create table ba_test (ba_key binary, ba_val binary) +PREHOOK: query: create table ba_test (ba_key binary, ba_val binary) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@ba_test -POSTHOOK: query: -- All the test in ba_test1.q + using LazyBinarySerde instead of LazySimpleSerde - -create table ba_test (ba_key binary, ba_val binary) +POSTHOOK: query: create table ba_test (ba_key binary, ba_val binary) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@ba_test diff --git a/ql/src/test/results/clientpositive/ba_table3.q.out b/ql/src/test/results/clientpositive/ba_table3.q.out index e1089d3..2ddc78b 100644 --- a/ql/src/test/results/clientpositive/ba_table3.q.out +++ b/ql/src/test/results/clientpositive/ba_table3.q.out @@ -2,15 +2,11 @@ PREHOOK: query: drop table ba_test PREHOOK: type: DROPTABLE POSTHOOK: query: drop table ba_test POSTHOOK: type: DROPTABLE -PREHOOK: query: -- All the tests of ba_table1.q + test for a group-by and aggregation on a binary key. - -create table ba_test (ba_key binary, ba_val binary) +PREHOOK: query: create table ba_test (ba_key binary, ba_val binary) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@ba_test -POSTHOOK: query: -- All the tests of ba_table1.q + test for a group-by and aggregation on a binary key. - -create table ba_test (ba_key binary, ba_val binary) +POSTHOOK: query: create table ba_test (ba_key binary, ba_val binary) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@ba_test diff --git a/ql/src/test/results/clientpositive/ba_table_udfs.q.out b/ql/src/test/results/clientpositive/ba_table_udfs.q.out index 26eb165..1682631 100644 --- a/ql/src/test/results/clientpositive/ba_table_udfs.q.out +++ b/ql/src/test/results/clientpositive/ba_table_udfs.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -USE default +PREHOOK: query: USE default PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:default -POSTHOOK: query: -- SORT_QUERY_RESULTS - -USE default +POSTHOOK: query: USE default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default PREHOOK: query: CREATE TABLE dest1(bytes1 BINARY, @@ -43,22 +39,18 @@ POSTHOOK: Output: default@dest1 POSTHOOK: Lineage: dest1.bytes1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: dest1.bytes2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: dest1.string SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: --Add in a null row for good measure -INSERT INTO TABLE dest1 SELECT NULL, NULL, NULL FROM dest1 LIMIT 1 +PREHOOK: query: INSERT INTO TABLE dest1 SELECT NULL, NULL, NULL FROM dest1 LIMIT 1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 PREHOOK: Output: default@dest1 -POSTHOOK: query: --Add in a null row for good measure -INSERT INTO TABLE dest1 SELECT NULL, NULL, NULL FROM dest1 LIMIT 1 +POSTHOOK: query: INSERT INTO TABLE dest1 SELECT NULL, NULL, NULL FROM dest1 LIMIT 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@dest1 POSTHOOK: Output: default@dest1 POSTHOOK: Lineage: dest1.bytes1 EXPRESSION [] POSTHOOK: Lineage: dest1.bytes2 EXPRESSION [] POSTHOOK: Lineage: dest1.string SIMPLE [] -PREHOOK: query: -- this query tests all the udfs provided to work with binary types - -SELECT +PREHOOK: query: SELECT bytes1, bytes2, string, @@ -77,9 +69,7 @@ FROM dest1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 #### A masked pattern was here #### -POSTHOOK: query: -- this query tests all the udfs provided to work with binary types - -SELECT +POSTHOOK: query: SELECT bytes1, bytes2, string, diff --git a/ql/src/test/results/clientpositive/ba_table_union.q.out b/ql/src/test/results/clientpositive/ba_table_union.q.out index 53f16b6..623d131 100644 --- a/ql/src/test/results/clientpositive/ba_table_union.q.out +++ b/ql/src/test/results/clientpositive/ba_table_union.q.out @@ -2,13 +2,11 @@ PREHOOK: query: drop table ba_test PREHOOK: type: DROPTABLE POSTHOOK: query: drop table ba_test POSTHOOK: type: DROPTABLE -PREHOOK: query: -- this query tests ba_table1.q + nested queries with multiple operations on binary data types + union on binary types -create table ba_test (ba_key binary, ba_val binary) +PREHOOK: query: create table ba_test (ba_key binary, ba_val binary) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@ba_test -POSTHOOK: query: -- this query tests ba_table1.q + nested queries with multiple operations on binary data types + union on binary types -create table ba_test (ba_key binary, ba_val binary) +POSTHOOK: query: create table ba_test (ba_key binary, ba_val binary) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@ba_test diff --git a/ql/src/test/results/clientpositive/binary_output_format.q.out b/ql/src/test/results/clientpositive/binary_output_format.q.out index f3c624c..addbea1 100644 --- a/ql/src/test/results/clientpositive/binary_output_format.q.out +++ b/ql/src/test/results/clientpositive/binary_output_format.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- Create a table with binary output format -CREATE TABLE dest1(mydata STRING) +PREHOOK: query: CREATE TABLE dest1(mydata STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' WITH SERDEPROPERTIES ( @@ -11,8 +10,7 @@ STORED AS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- Create a table with binary output format -CREATE TABLE dest1(mydata STRING) +POSTHOOK: query: CREATE TABLE dest1(mydata STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' WITH SERDEPROPERTIES ( @@ -24,8 +22,7 @@ STORED AS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 -PREHOOK: query: -- Insert into that table using transform -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT TRANSFORM(*) USING 'cat' @@ -38,8 +35,7 @@ SELECT TRANSFORM(*) RECORDREADER 'org.apache.hadoop.hive.ql.exec.BinaryRecordReader' FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- Insert into that table using transform -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT TRANSFORM(*) USING 'cat' @@ -420,13 +416,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@dest1 POSTHOOK: Lineage: dest1.mydata SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Test the result -SELECT * FROM dest1 +PREHOOK: query: SELECT * FROM dest1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 #### A masked pattern was here #### -POSTHOOK: query: -- Test the result -SELECT * FROM dest1 +POSTHOOK: query: SELECT * FROM dest1 POSTHOOK: type: QUERY POSTHOOK: Input: default@dest1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/binary_table_bincolserde.q.out b/ql/src/test/results/clientpositive/binary_table_bincolserde.q.out index 24c5edc..ba692ad 100644 --- a/ql/src/test/results/clientpositive/binary_table_bincolserde.q.out +++ b/ql/src/test/results/clientpositive/binary_table_bincolserde.q.out @@ -2,15 +2,11 @@ PREHOOK: query: drop table ba_test PREHOOK: type: DROPTABLE POSTHOOK: query: drop table ba_test POSTHOOK: type: DROPTABLE -PREHOOK: query: -- Tests everything in binary_table_colserde.q + uses LazyBinaryColumnarSerde - -create table ba_test (ba_key binary, ba_val binary) stored as rcfile +PREHOOK: query: create table ba_test (ba_key binary, ba_val binary) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@ba_test -POSTHOOK: query: -- Tests everything in binary_table_colserde.q + uses LazyBinaryColumnarSerde - -create table ba_test (ba_key binary, ba_val binary) stored as rcfile +POSTHOOK: query: create table ba_test (ba_key binary, ba_val binary) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@ba_test diff --git a/ql/src/test/results/clientpositive/binary_table_colserde.q.out b/ql/src/test/results/clientpositive/binary_table_colserde.q.out index 3a026f3..5fdddc9 100644 --- a/ql/src/test/results/clientpositive/binary_table_colserde.q.out +++ b/ql/src/test/results/clientpositive/binary_table_colserde.q.out @@ -2,15 +2,11 @@ PREHOOK: query: drop table ba_test PREHOOK: type: DROPTABLE POSTHOOK: query: drop table ba_test POSTHOOK: type: DROPTABLE -PREHOOK: query: -- Everything in ba_table1.q + columnar serde in RCFILE. - -create table ba_test (ba_key binary, ba_val binary) stored as rcfile +PREHOOK: query: create table ba_test (ba_key binary, ba_val binary) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@ba_test -POSTHOOK: query: -- Everything in ba_table1.q + columnar serde in RCFILE. - -create table ba_test (ba_key binary, ba_val binary) stored as rcfile +POSTHOOK: query: create table ba_test (ba_key binary, ba_val binary) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@ba_test diff --git a/ql/src/test/results/clientpositive/bucket1.q.out b/ql/src/test/results/clientpositive/bucket1.q.out index 92ecd67..48e5126 100644 --- a/ql/src/test/results/clientpositive/bucket1.q.out +++ b/ql/src/test/results/clientpositive/bucket1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE bucket1_1(key int, value string) CLUSTERED BY (key) INTO 100 BUCKETS +PREHOOK: query: CREATE TABLE bucket1_1(key int, value string) CLUSTERED BY (key) INTO 100 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket1_1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE bucket1_1(key int, value string) CLUSTERED BY (key) INTO 100 BUCKETS +POSTHOOK: query: CREATE TABLE bucket1_1(key int, value string) CLUSTERED BY (key) INTO 100 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket1_1 diff --git a/ql/src/test/results/clientpositive/bucket2.q.out b/ql/src/test/results/clientpositive/bucket2.q.out index b849ed3..55e2d37 100644 --- a/ql/src/test/results/clientpositive/bucket2.q.out +++ b/ql/src/test/results/clientpositive/bucket2.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket2_1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket2_1 diff --git a/ql/src/test/results/clientpositive/bucket3.q.out b/ql/src/test/results/clientpositive/bucket3.q.out index fa8b0f9..95f2f86 100644 --- a/ql/src/test/results/clientpositive/bucket3.q.out +++ b/ql/src/test/results/clientpositive/bucket3.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket3_1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket3_1 diff --git a/ql/src/test/results/clientpositive/bucket_map_join_1.q.out b/ql/src/test/results/clientpositive/bucket_map_join_1.q.out index 418e5aa..70d2513 100644 --- a/ql/src/test/results/clientpositive/bucket_map_join_1.q.out +++ b/ql/src/test/results/clientpositive/bucket_map_join_1.q.out @@ -42,18 +42,10 @@ POSTHOOK: query: load data local inpath '../../data/files/SortCol2Col1.txt' over POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@table2 -PREHOOK: query: -- The tables are bucketed in same columns in different order, --- but sorted in different column orders --- Neither bucketed map-join, nor sort-merge join should be performed - -explain extended +PREHOOK: query: explain extended select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value PREHOOK: type: QUERY -POSTHOOK: query: -- The tables are bucketed in same columns in different order, --- but sorted in different column orders --- Neither bucketed map-join, nor sort-merge join should be performed - -explain extended +POSTHOOK: query: explain extended select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/bucket_map_join_2.q.out b/ql/src/test/results/clientpositive/bucket_map_join_2.q.out index 01ad865..f7f921e 100644 --- a/ql/src/test/results/clientpositive/bucket_map_join_2.q.out +++ b/ql/src/test/results/clientpositive/bucket_map_join_2.q.out @@ -42,18 +42,10 @@ POSTHOOK: query: load data local inpath '../../data/files/SortCol2Col1.txt' over POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@table2 -PREHOOK: query: -- The tables are bucketed in same columns in different order, --- but sorted in different column orders --- Neither bucketed map-join, nor sort-merge join should be performed - -explain extended +PREHOOK: query: explain extended select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value PREHOOK: type: QUERY -POSTHOOK: query: -- The tables are bucketed in same columns in different order, --- but sorted in different column orders --- Neither bucketed map-join, nor sort-merge join should be performed - -explain extended +POSTHOOK: query: explain extended select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/bucket_map_join_spark4.q.out b/ql/src/test/results/clientpositive/bucket_map_join_spark4.q.out index 662ad0e..72398af 100644 --- a/ql/src/test/results/clientpositive/bucket_map_join_spark4.q.out +++ b/ql/src/test/results/clientpositive/bucket_map_join_spark4.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tbl1 diff --git a/ql/src/test/results/clientpositive/bucketcontext_1.q.out b/ql/src/test/results/clientpositive/bucketcontext_1.q.out index f8b3020..d7e50c7 100644 --- a/ql/src/test/results/clientpositive/bucketcontext_1.q.out +++ b/ql/src/test/results/clientpositive/bucketcontext_1.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small diff --git a/ql/src/test/results/clientpositive/bucketcontext_2.q.out b/ql/src/test/results/clientpositive/bucketcontext_2.q.out index 9a39392..1a3e4d2 100644 --- a/ql/src/test/results/clientpositive/bucketcontext_2.q.out +++ b/ql/src/test/results/clientpositive/bucketcontext_2.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small 1 part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 1 part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small diff --git a/ql/src/test/results/clientpositive/bucketcontext_3.q.out b/ql/src/test/results/clientpositive/bucketcontext_3.q.out index b1b8fcc..c766c9f 100644 --- a/ql/src/test/results/clientpositive/bucketcontext_3.q.out +++ b/ql/src/test/results/clientpositive/bucketcontext_3.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small 2 part, 2 bucket & big 1 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 2 part, 2 bucket & big 1 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small diff --git a/ql/src/test/results/clientpositive/bucketcontext_4.q.out b/ql/src/test/results/clientpositive/bucketcontext_4.q.out index 95d1a6b..eda96cf 100644 --- a/ql/src/test/results/clientpositive/bucketcontext_4.q.out +++ b/ql/src/test/results/clientpositive/bucketcontext_4.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small 2 part, 4 bucket & big 1 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 2 part, 4 bucket & big 1 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small diff --git a/ql/src/test/results/clientpositive/bucketcontext_5.q.out b/ql/src/test/results/clientpositive/bucketcontext_5.q.out index 39286fb..6fa8e65 100644 --- a/ql/src/test/results/clientpositive/bucketcontext_5.q.out +++ b/ql/src/test/results/clientpositive/bucketcontext_5.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small no part, 4 bucket & big no part, 2 bucket -CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small no part, 4 bucket & big no part, 2 bucket -CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small diff --git a/ql/src/test/results/clientpositive/bucketcontext_6.q.out b/ql/src/test/results/clientpositive/bucketcontext_6.q.out index bb7ffcb..60bf036 100644 --- a/ql/src/test/results/clientpositive/bucketcontext_6.q.out +++ b/ql/src/test/results/clientpositive/bucketcontext_6.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small no part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small no part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small diff --git a/ql/src/test/results/clientpositive/bucketcontext_7.q.out b/ql/src/test/results/clientpositive/bucketcontext_7.q.out index 16e62d1..2218cfc 100644 --- a/ql/src/test/results/clientpositive/bucketcontext_7.q.out +++ b/ql/src/test/results/clientpositive/bucketcontext_7.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small 2 part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 2 part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small diff --git a/ql/src/test/results/clientpositive/bucketcontext_8.q.out b/ql/src/test/results/clientpositive/bucketcontext_8.q.out index 7a17693..0ad478c 100644 --- a/ql/src/test/results/clientpositive/bucketcontext_8.q.out +++ b/ql/src/test/results/clientpositive/bucketcontext_8.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small 2 part, 2 bucket & big 2 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 2 part, 2 bucket & big 2 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small diff --git a/ql/src/test/results/clientpositive/bucketmapjoin10.q.out b/ql/src/test/results/clientpositive/bucketmapjoin10.q.out index 0176c2a..a0db9c4 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin10.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin10.q.out @@ -126,16 +126,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 3 POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@srcbucket_mapjoin_part_2 POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: -- The table bucketing metadata matches but the partition metadata does not, bucket map join should not be used - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL PREHOOK: type: QUERY -POSTHOOK: query: -- The table bucketing metadata matches but the partition metadata does not, bucket map join should not be used - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL diff --git a/ql/src/test/results/clientpositive/bucketmapjoin11.q.out b/ql/src/test/results/clientpositive/bucketmapjoin11.q.out index d8e7b66..c0c425f 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin11.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin11.q.out @@ -134,18 +134,12 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=2 -PREHOOK: query: -- The table and partition bucketing metadata doesn't match but the bucket numbers of all partitions is --- a power of 2 and the bucketing columns match so bucket map join should be used - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL PREHOOK: type: QUERY -POSTHOOK: query: -- The table and partition bucketing metadata doesn't match but the bucket numbers of all partitions is --- a power of 2 and the bucketing columns match so bucket map join should be used - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL diff --git a/ql/src/test/results/clientpositive/bucketmapjoin12.q.out b/ql/src/test/results/clientpositive/bucketmapjoin12.q.out index f53af00..73a8ed9 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin12.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin12.q.out @@ -95,16 +95,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_3 CLUSTERED BY (key) INTO 2 POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@srcbucket_mapjoin_part_3 POSTHOOK: Output: default@srcbucket_mapjoin_part_3 -PREHOOK: query: -- The partition bucketing metadata match but one table is not bucketed, bucket map join should still be used - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' and b.part = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- The partition bucketing metadata match but one table is not bucketed, bucket map join should still be used - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' and b.part = '1' @@ -328,16 +324,12 @@ POSTHOOK: Input: default@srcbucket_mapjoin_part_2 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 #### A masked pattern was here #### 464 -PREHOOK: query: -- The table bucketing metadata match but one partition is not bucketed, bucket map join should not be used - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_3 b ON a.key = b.key AND a.part = '1' and b.part = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- The table bucketing metadata match but one partition is not bucketed, bucket map join should not be used - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_3 b ON a.key = b.key AND a.part = '1' and b.part = '1' diff --git a/ql/src/test/results/clientpositive/bucketmapjoin13.q.out b/ql/src/test/results/clientpositive/bucketmapjoin13.q.out index 55d6ffd..fcf8cbf 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin13.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin13.q.out @@ -8,14 +8,12 @@ CLUSTERED BY (value) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -PREHOOK: query: -- part=1 partition for srcbucket_mapjoin_part_1 is bucketed by 'value' -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: query: -- part=1 partition for srcbucket_mapjoin_part_1 is bucketed by 'value' -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -30,14 +28,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (key) INTO 2 POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@srcbucket_mapjoin_part_1 POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -PREHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=2 -POSTHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -54,30 +50,24 @@ CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: -- part=1 partition for srcbucket_mapjoin_part_2 is bucketed by 'key' -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: query: -- part=1 partition for srcbucket_mapjoin_part_2 is bucketed by 'key' -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- part=1 partition for srcbucket_mapjoin_part_1 is bucketed by 'value' --- and it is also being joined. So, bucketed map-join cannot be performed -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- part=1 partition for srcbucket_mapjoin_part_1 is bucketed by 'value' --- and it is also being joined. So, bucketed map-join cannot be performed -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key @@ -346,16 +336,12 @@ POSTHOOK: Input: default@srcbucket_mapjoin_part_2 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 #### A masked pattern was here #### 2056 -PREHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' --- and it is being joined. So, bucketed map-join can be performed -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key and a.part = '2' PREHOOK: type: QUERY -POSTHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' --- and it is being joined. So, bucketed map-join can be performed -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key and a.part = '2' @@ -590,16 +576,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1 drop partition (part = '1' POSTHOOK: type: ALTERTABLE_DROPPARTS POSTHOOK: Input: default@srcbucket_mapjoin_part_1 POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' --- and it is being joined. So, bucketed map-join can be performed -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' --- and it is being joined. So, bucketed map-join can be performed -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key @@ -834,18 +816,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (value) INTO POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@srcbucket_mapjoin_part_1 POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -PREHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' --- and it is being joined. So, bucketed map-join can be performed --- The fact that the table is being bucketed by 'value' does not matter -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' --- and it is being joined. So, bucketed map-join can be performed --- The fact that the table is being bucketed by 'value' does not matter -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key diff --git a/ql/src/test/results/clientpositive/bucketmapjoin8.q.out b/ql/src/test/results/clientpositive/bucketmapjoin8.q.out index 5ce0ae3..3cbd2bb 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin8.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin8.q.out @@ -60,16 +60,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 3 POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@srcbucket_mapjoin_part_2 POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: -- The partition bucketing metadata match but the tables have different numbers of buckets, bucket map join should still be used - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' and b.part = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- The partition bucketing metadata match but the tables have different numbers of buckets, bucket map join should still be used - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' and b.part = '1' @@ -302,16 +298,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (value) INTO POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@srcbucket_mapjoin_part_2 POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: -- The partition bucketing metadata match but the tables are bucketed on different columns, bucket map join should still be used - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' and b.part = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- The partition bucketing metadata match but the tables are bucketed on different columns, bucket map join should still be used - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' and b.part = '1' diff --git a/ql/src/test/results/clientpositive/bucketmapjoin9.q.out b/ql/src/test/results/clientpositive/bucketmapjoin9.q.out index 9c4a0ca..ea4b2d4 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin9.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin9.q.out @@ -68,16 +68,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@srcbucket_mapjoin_part_2 POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: -- The table bucketing metadata matches but the partitions have different numbers of buckets, bucket map join should not be used - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' and b.part = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- The table bucketing metadata matches but the partitions have different numbers of buckets, bucket map join should not be used - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' and b.part = '1' @@ -335,16 +331,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@srcbucket_mapjoin_part_2 POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: -- The table bucketing metadata matches but the partitions are bucketed on different columns, bucket map join should not be used - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' AND b.part = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- The table bucketing metadata matches but the partitions are bucketed on different columns, bucket map join should not be used - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' AND b.part = '1' diff --git a/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out b/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out index ed107a3..7114a76 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out @@ -142,11 +142,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@test4 -PREHOOK: query: -- should be allowed -explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value +PREHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value PREHOOK: type: QUERY -POSTHOOK: query: -- should be allowed -explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value +POSTHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage @@ -436,11 +434,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- should not apply bucket mapjoin -explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key +PREHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key PREHOOK: type: QUERY -POSTHOOK: query: -- should not apply bucket mapjoin -explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key +POSTHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_1.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_1.q.out index 48de423..165f0dc 100644 --- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_1.q.out +++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_1.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -32,18 +30,14 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table1@ds=1 POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT x.key, x.value from ( SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' )x PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT x.key, x.value from ( @@ -246,16 +240,14 @@ POSTHOOK: Input: default@test_table2 POSTHOOK: Input: default@test_table2@ds=1 #### A masked pattern was here #### 253 -PREHOOK: query: -- it should be a map-only job -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT x.key, concat(x.value, x.value) from ( SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' )x PREHOOK: type: QUERY -POSTHOOK: query: -- it should be a map-only job -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT x.key, concat(x.value, x.value) from ( @@ -302,16 +294,14 @@ STAGE PLANS: Stage: Stage-2 Stats-Aggr Operator -PREHOOK: query: -- it should be a map-reduce job -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT x.key+x.key, x.value from ( SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' )x PREHOOK: type: QUERY -POSTHOOK: query: -- it should be a map-reduce job -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT x.key+x.key, x.value from ( @@ -369,16 +359,14 @@ STAGE PLANS: Stage: Stage-2 Stats-Aggr Operator -PREHOOK: query: -- it should be a map-only job -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT x.k1, concat(x.v1, x.v1) from ( SELECT a.key as k1, a.value as v1 FROM test_table1 a WHERE a.ds = '1' )x PREHOOK: type: QUERY -POSTHOOK: query: -- it should be a map-only job -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT x.k1, concat(x.v1, x.v1) from ( diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out index 8831080..11c7c39 100644 --- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out +++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_3.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -32,18 +30,12 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table1@ds=1 POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- The bucketing positions dont match - although the actual bucketing do. --- This should be a map-only operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT x.value, x.key from (SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1')x PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- The bucketing positions dont match - although the actual bucketing do. --- This should be a map-only operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT x.value, x.key from (SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1')x @@ -145,16 +137,12 @@ CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table3 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- The bucketing positions dont match - this should be a map-reduce operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT x.key, x.value from (SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1')x PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- The bucketing positions dont match - this should be a map-reduce operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT x.key, x.value from (SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1')x diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out index 2b61b4c..1d794c3 100644 --- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out +++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -54,17 +52,13 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table2@ds=1 POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation, since the insert is happening on the bucketing position -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation, since the insert is happening on the bucketing position -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b @@ -332,17 +326,13 @@ CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table3 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation, since the insert is happening on a non-bucketing position -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation, since the insert is happening on a non-bucketing position -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a JOIN test_table2 b diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_5.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_5.q.out index 44466b8..1e70105 100644 --- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_5.q.out +++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_5.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -54,17 +52,13 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table2@ds=1 POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation, since the sort-order does not match -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation, since the sort-order does not match -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b @@ -333,8 +327,7 @@ POSTHOOK: Input: default@test_table3@ds=1 5 val_5val_5 1 5 val_5val_5 1 5 val_5val_5 1 -PREHOOK: query: -- This should be a map-reduce job since the sort order does not match -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM @@ -343,8 +336,7 @@ JOIN (select key, value from test_table2 where ds = '1') b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- This should be a map-reduce job since the sort order does not match -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_8.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_8.q.out index 6c4a077..f3d3006 100644 --- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_8.q.out +++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_8.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -54,17 +52,13 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table2@ds=1 POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, b.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, b.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b @@ -187,17 +181,13 @@ POSTHOOK: Input: default@test_table3@ds=1 5 5 val_5val_5 1 5 5 val_5val_5 1 9 9 val_9val_9 1 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT b.key, a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT b.key, a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b diff --git a/ql/src/test/results/clientpositive/cast_to_int.q.out b/ql/src/test/results/clientpositive/cast_to_int.q.out index 398b13f..98e09c7 100644 --- a/ql/src/test/results/clientpositive/cast_to_int.q.out +++ b/ql/src/test/results/clientpositive/cast_to_int.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- cast string floats to integer types -select +PREHOOK: query: select cast('1' as float), cast('1.4' as float), cast('1.6' as float), @@ -31,8 +30,7 @@ from src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- cast string floats to integer types -select +POSTHOOK: query: select cast('1' as float), cast('1.4' as float), cast('1.6' as float), diff --git a/ql/src/test/results/clientpositive/cbo_gby_empty.q.out b/ql/src/test/results/clientpositive/cbo_gby_empty.q.out index 68f0255..6970fd2 100644 --- a/ql/src/test/results/clientpositive/cbo_gby_empty.q.out +++ b/ql/src/test/results/clientpositive/cbo_gby_empty.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- 21. Test groupby is empty and there is no other cols in aggr -select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc +PREHOOK: query: select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- 21. Test groupby is empty and there is no other cols in aggr -select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc +POSTHOOK: query: select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out b/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out index f1ddd87..f260f03 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_annotate_stats_groupby.q.out @@ -1,24 +1,4 @@ -PREHOOK: query: -- hash aggregation is disabled - --- There are different cases for Group By depending on map/reduce side, hash aggregation, --- grouping sets and column stats. If we don't have column stats, we just assume hash --- aggregation is disabled. Following are the possible cases and rule for cardinality --- estimation - --- MAP SIDE: --- Case 1: NO column stats, NO hash aggregation, NO grouping sets — numRows --- Case 2: NO column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet --- Case 3: column stats, hash aggregation, NO grouping sets — Min(numRows / 2, ndvProduct * parallelism) --- Case 4: column stats, hash aggregation, grouping sets — Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet) --- Case 5: column stats, NO hash aggregation, NO grouping sets — numRows --- Case 6: column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet - --- REDUCE SIDE: --- Case 7: NO column stats — numRows / 2 --- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet) --- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct) - -create table if not exists loc_staging ( +PREHOOK: query: create table if not exists loc_staging ( state string, locid int, zip bigint, @@ -27,27 +7,7 @@ create table if not exists loc_staging ( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@loc_staging -POSTHOOK: query: -- hash aggregation is disabled - --- There are different cases for Group By depending on map/reduce side, hash aggregation, --- grouping sets and column stats. If we don't have column stats, we just assume hash --- aggregation is disabled. Following are the possible cases and rule for cardinality --- estimation - --- MAP SIDE: --- Case 1: NO column stats, NO hash aggregation, NO grouping sets — numRows --- Case 2: NO column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet --- Case 3: column stats, hash aggregation, NO grouping sets — Min(numRows / 2, ndvProduct * parallelism) --- Case 4: column stats, hash aggregation, grouping sets — Min((numRows * sizeOfGroupingSet) / 2, ndvProduct * parallelism * sizeOfGroupingSet) --- Case 5: column stats, NO hash aggregation, NO grouping sets — numRows --- Case 6: column stats, NO hash aggregation, grouping sets — numRows * sizeOfGroupingSet - --- REDUCE SIDE: --- Case 7: NO column stats — numRows / 2 --- Case 8: column stats, grouping sets — Min(numRows, ndvProduct * sizeOfGroupingSet) --- Case 9: column stats, NO grouping sets - Min(numRows, ndvProduct) - -create table if not exists loc_staging ( +POSTHOOK: query: create table if not exists loc_staging ( state string, locid int, zip bigint, @@ -92,11 +52,9 @@ POSTHOOK: Lineage: loc_orc.locid SIMPLE [(loc_staging)loc_staging.FieldSchema(na POSTHOOK: Lineage: loc_orc.state SIMPLE [(loc_staging)loc_staging.FieldSchema(name:state, type:string, comment:null), ] POSTHOOK: Lineage: loc_orc.year SIMPLE [(loc_staging)loc_staging.FieldSchema(name:year, type:int, comment:null), ] POSTHOOK: Lineage: loc_orc.zip SIMPLE [(loc_staging)loc_staging.FieldSchema(name:zip, type:bigint, comment:null), ] -PREHOOK: query: -- numRows: 8 rawDataSize: 796 -explain select * from loc_orc +PREHOOK: query: explain select * from loc_orc PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 8 rawDataSize: 796 -explain select * from loc_orc +POSTHOOK: query: explain select * from loc_orc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -115,28 +73,22 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 796 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- partial column stats -analyze table loc_orc compute statistics for columns state +PREHOOK: query: analyze table loc_orc compute statistics for columns state PREHOOK: type: QUERY PREHOOK: Input: default@loc_orc #### A masked pattern was here #### -POSTHOOK: query: -- partial column stats -analyze table loc_orc compute statistics for columns state +POSTHOOK: query: analyze table loc_orc compute statistics for columns state POSTHOOK: type: QUERY POSTHOOK: Input: default@loc_orc #### A masked pattern was here #### -PREHOOK: query: -- inner group by: map - numRows: 8 reduce - numRows: 4 --- outer group by: map - numRows: 4 reduce numRows: 2 -explain select a, c, min(b) +PREHOOK: query: explain select a, c, min(b) from ( select state as a, locid as b, count(*) as c from loc_orc group by state,locid ) sq1 group by a,c PREHOOK: type: QUERY -POSTHOOK: query: -- inner group by: map - numRows: 8 reduce - numRows: 4 --- outer group by: map - numRows: 4 reduce numRows: 2 -explain select a, c, min(b) +POSTHOOK: query: explain select a, c, min(b) from ( select state as a, locid as b, count(*) as c from loc_orc group by state,locid @@ -230,13 +182,9 @@ POSTHOOK: query: analyze table loc_orc compute statistics for columns state,loci POSTHOOK: type: QUERY POSTHOOK: Input: default@loc_orc #### A masked pattern was here #### -PREHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 --- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select year from loc_orc group by year +PREHOOK: query: explain select year from loc_orc group by year PREHOOK: type: QUERY -POSTHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 --- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select year from loc_orc group by year +POSTHOOK: query: explain select year from loc_orc group by year POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -283,13 +231,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 --- Case 9: column stats, NO grouping sets - caridnality = 8 -explain select state,locid from loc_orc group by state,locid +PREHOOK: query: explain select state,locid from loc_orc group by state,locid PREHOOK: type: QUERY -POSTHOOK: query: -- Case 5: column stats, NO hash aggregation, NO grouping sets - cardinality = 8 --- Case 9: column stats, NO grouping sets - caridnality = 8 -explain select state,locid from loc_orc group by state,locid +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -336,13 +280,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 --- Case 8: column stats, grouping sets - cardinality = 32 -explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: query: explain select state,locid from loc_orc group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 --- Case 8: column stats, grouping sets - cardinality = 32 -explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -393,13 +333,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 --- Case 8: column stats, grouping sets - cardinality = 24 -explain select state,locid from loc_orc group by state,locid with rollup +PREHOOK: query: explain select state,locid from loc_orc group by state,locid with rollup PREHOOK: type: QUERY -POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 --- Case 8: column stats, grouping sets - cardinality = 24 -explain select state,locid from loc_orc group by state,locid with rollup +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with rollup POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -503,13 +439,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 8 --- Case 8: column stats, grouping sets - cardinality = 8 -explain select state,locid from loc_orc group by state,locid grouping sets((state)) +PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state)) PREHOOK: type: QUERY -POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 8 --- Case 8: column stats, grouping sets - cardinality = 8 -explain select state,locid from loc_orc group by state,locid grouping sets((state)) +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -560,13 +492,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 16 --- Case 8: column stats, grouping sets - cardinality = 16 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) PREHOOK: type: QUERY -POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 16 --- Case 8: column stats, grouping sets - cardinality = 16 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -617,13 +545,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 --- Case 8: column stats, grouping sets - cardinality = 24 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 24 --- Case 8: column stats, grouping sets - cardinality = 24 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -674,13 +598,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 --- Case 8: column stats, grouping sets - cardinality = 32 -explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: -- Case 6: column stats, NO hash aggregation, grouping sets - cardinality = 32 --- Case 8: column stats, grouping sets - cardinality = 32 -explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -731,17 +651,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map-side parallelism will be 10 - --- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 --- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select year from loc_orc group by year +PREHOOK: query: explain select year from loc_orc group by year PREHOOK: type: QUERY -POSTHOOK: query: -- map-side parallelism will be 10 - --- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 --- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select year from loc_orc group by year +POSTHOOK: query: explain select year from loc_orc group by year POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -788,13 +700,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 4: column stats, hash aggregation, grouping sets - cardinality = 16 --- Case 8: column stats, grouping sets - cardinality = 16 -explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: query: explain select state,locid from loc_orc group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: -- Case 4: column stats, hash aggregation, grouping sets - cardinality = 16 --- Case 8: column stats, grouping sets - cardinality = 16 -explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -845,15 +753,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- ndvProduct becomes 0 as zip does not have column stats --- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 --- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select state,zip from loc_orc group by state,zip +PREHOOK: query: explain select state,zip from loc_orc group by state,zip PREHOOK: type: QUERY -POSTHOOK: query: -- ndvProduct becomes 0 as zip does not have column stats --- Case 3: column stats, hash aggregation, NO grouping sets - cardinality = 4 --- Case 9: column stats, NO grouping sets - caridnality = 2 -explain select state,zip from loc_orc group by state,zip +POSTHOOK: query: explain select state,zip from loc_orc group by state,zip POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -900,13 +802,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 --- Case 7: NO column stats - cardinality = 16 -explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: query: explain select state,locid from loc_orc group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 --- Case 7: NO column stats - cardinality = 16 -explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -957,13 +855,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 --- Case 7: NO column stats - cardinality = 12 -explain select state,locid from loc_orc group by state,locid with rollup +PREHOOK: query: explain select state,locid from loc_orc group by state,locid with rollup PREHOOK: type: QUERY -POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 --- Case 7: NO column stats - cardinality = 12 -explain select state,locid from loc_orc group by state,locid with rollup +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with rollup POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1067,13 +961,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 --- Case 7: NO column stats - cardinality = 4 -explain select state,locid from loc_orc group by state,locid grouping sets((state)) +PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state)) PREHOOK: type: QUERY -POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 --- Case 7: NO column stats - cardinality = 4 -explain select state,locid from loc_orc group by state,locid grouping sets((state)) +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1124,13 +1014,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 16 --- Case 7: NO column stats - cardinality = 8 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) PREHOOK: type: QUERY -POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 16 --- Case 7: NO column stats - cardinality = 8 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid)) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1181,13 +1067,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 --- Case 7: NO column stats - cardinality = 12 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 24 --- Case 7: NO column stats - cardinality = 12 -explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1238,13 +1120,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 --- Case 7: NO column stats - cardinality = 16 -explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +PREHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) PREHOOK: type: QUERY -POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 --- Case 7: NO column stats - cardinality = 16 -explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid grouping sets((state,locid),(state),(locid),()) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1295,13 +1173,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 --- Case 7: NO column stats - cardinality = 4 -explain select year from loc_orc group by year +PREHOOK: query: explain select year from loc_orc group by year PREHOOK: type: QUERY -POSTHOOK: query: -- Case 1: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 8 --- Case 7: NO column stats - cardinality = 4 -explain select year from loc_orc group by year +POSTHOOK: query: explain select year from loc_orc group by year POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1348,13 +1222,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 --- Case 7: NO column stats - cardinality = 16 -explain select state,locid from loc_orc group by state,locid with cube +PREHOOK: query: explain select state,locid from loc_orc group by state,locid with cube PREHOOK: type: QUERY -POSTHOOK: query: -- Case 2: NO column stats, NO hash aggregation, NO grouping sets - cardinality = 32 --- Case 7: NO column stats - cardinality = 16 -explain select state,locid from loc_orc group by state,locid with cube +POSTHOOK: query: explain select state,locid from loc_orc group by state,locid with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out b/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out index 988a27d..fe4bc4f 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tbl1 @@ -74,14 +70,12 @@ POSTHOOK: query: analyze table tbl2 compute statistics for columns POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### -PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 @@ -215,8 +209,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 22 -PREHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select key, count(*) from @@ -226,8 +219,7 @@ select count(*) from group by key ) subq2 PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select key, count(*) from @@ -358,9 +350,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 6 -PREHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them. --- Each sub-query should be converted to a sort-merge join. -explain +PREHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -377,9 +367,7 @@ join ) src2 on src1.key = src2.key PREHOOK: type: QUERY -POSTHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them. --- Each sub-query should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -608,18 +596,14 @@ POSTHOOK: Input: default@tbl2 5 9 9 8 1 1 9 1 1 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -695,9 +679,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join, although there is more than one level of sub-query -explain +PREHOOK: query: explain select count(*) from ( select * from @@ -709,9 +691,7 @@ select count(*) from join tbl2 b on subq2.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join, although there is more than one level of sub-query -explain +POSTHOOK: query: explain select count(*) from ( select * from @@ -802,9 +782,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. --- The join should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select * from @@ -823,9 +801,7 @@ select count(*) from ) subq4 on subq2.key = subq4.key PREHOOK: type: QUERY -POSTHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. --- The join should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select * from @@ -935,20 +911,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl1 #### A masked pattern was here #### 20 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key --- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one --- item, but that is not part of the join key. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 join (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key --- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one --- item, but that is not part of the join key. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 join @@ -1024,18 +994,14 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side --- join should be performed -explain +PREHOOK: query: explain select count(*) from (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 join (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side --- join should be performed -explain +POSTHOOK: query: explain select count(*) from (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 join @@ -1217,16 +1183,12 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 22 -PREHOOK: query: -- One of the tables is a sub-query and the other is not. --- It should be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join tbl2 a on subq1.key = a.key PREHOOK: type: QUERY -POSTHOOK: query: -- One of the tables is a sub-query and the other is not. --- It should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join tbl2 a on subq1.key = a.key @@ -1296,9 +1258,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. --- It should be converted to to a sort-merge join -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -1308,9 +1268,7 @@ select count(*) from (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 on (subq1.key = subq3.key) PREHOOK: type: QUERY -POSTHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. --- It should be converted to to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -1397,9 +1355,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 56 -PREHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that. --- The join should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select subq2.key as key, subq2.value as value1, b.value as value2 from ( @@ -1412,9 +1368,7 @@ select count(*) from ( join tbl2 b on subq2.key = b.key) a PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that. --- The join should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select subq2.key as key, subq2.value as value1, b.value as value2 from ( @@ -1508,13 +1462,9 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- The join is followed by a multi-table insert. It should be converted to --- a sort-merge join -explain select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +PREHOOK: query: explain select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- The join is followed by a multi-table insert. It should be converted to --- a sort-merge join -explain select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +POSTHOOK: query: explain select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1589,13 +1539,9 @@ POSTHOOK: Input: default@tbl2 5 val_5 val_5 8 val_8 val_8 9 val_9 val_9 -PREHOOK: query: -- The join is followed by a multi-table insert, and one of the inserts involves a reducer. --- It should be converted to a sort-merge join -explain select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +PREHOOK: query: explain select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- The join is followed by a multi-table insert, and one of the inserts involves a reducer. --- It should be converted to a sort-merge join -explain select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key +POSTHOOK: query: explain select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out b/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out index 432a75a..1c7b652 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_cross_product_check_2.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table A as +PREHOOK: query: create table A as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@A -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table A as +POSTHOOK: query: create table A as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/cbo_rp_gby2_map_multi_distinct.q.out b/ql/src/test/results/clientpositive/cbo_rp_gby2_map_multi_distinct.q.out index 8592d6c..d4d70bc 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_gby2_map_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_gby2_map_multi_distinct.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 @@ -120,17 +116,13 @@ POSTHOOK: Input: default@dest1 7 6 7735.0 447 10 8 8 8762.0 595 10 9 7 91047.0 577 12 -PREHOOK: query: -- HIVE-5560 when group by key is used in distinct funtion, invalid result are returned - -EXPLAIN +PREHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY -POSTHOOK: query: -- HIVE-5560 when group by key is used in distinct funtion, invalid result are returned - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) diff --git a/ql/src/test/results/clientpositive/cbo_rp_gby_empty.q.out b/ql/src/test/results/clientpositive/cbo_rp_gby_empty.q.out index 68f0255..6970fd2 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_gby_empty.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_gby_empty.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- 21. Test groupby is empty and there is no other cols in aggr -select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc +PREHOOK: query: select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- 21. Test groupby is empty and there is no other cols in aggr -select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc +POSTHOOK: query: select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/cbo_rp_insert.q.out b/ql/src/test/results/clientpositive/cbo_rp_insert.q.out index 05c87d4..fde7915 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_insert.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_insert.q.out @@ -38,13 +38,11 @@ POSTHOOK: type: QUERY POSTHOOK: Output: x314@source POSTHOOK: Lineage: source.s1 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] POSTHOOK: Lineage: source.s2 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] -PREHOOK: query: -- expect source to contain 1 row (1,2) -select * from source +PREHOOK: query: select * from source PREHOOK: type: QUERY PREHOOK: Input: x314@source #### A masked pattern was here #### -POSTHOOK: query: -- expect source to contain 1 row (1,2) -select * from source +POSTHOOK: query: select * from source POSTHOOK: type: QUERY POSTHOOK: Input: x314@source #### A masked pattern was here #### @@ -60,13 +58,11 @@ POSTHOOK: Output: x314@target1 POSTHOOK: Lineage: target1.x SIMPLE [(source)source.FieldSchema(name:s2, type:int, comment:null), ] POSTHOOK: Lineage: target1.y SIMPLE [] POSTHOOK: Lineage: target1.z SIMPLE [(source)source.FieldSchema(name:s1, type:int, comment:null), ] -PREHOOK: query: -- expect target1 to contain 1 row (2,NULL,1) -select * from target1 +PREHOOK: query: select * from target1 PREHOOK: type: QUERY PREHOOK: Input: x314@target1 #### A masked pattern was here #### -POSTHOOK: query: -- expect target1 to contain 1 row (2,NULL,1) -select * from target1 +POSTHOOK: query: select * from target1 POSTHOOK: type: QUERY POSTHOOK: Input: x314@target1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/cbo_rp_join0.q.out b/ql/src/test/results/clientpositive/cbo_rp_join0.q.out index 149383a..785cf86 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_join0.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_join0.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- Merge join into multijoin operator 1 -explain select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join +PREHOOK: query: explain select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p right outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3) cbo_t3 on cbo_t1.key=a PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS --- Merge join into multijoin operator 1 -explain select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join +POSTHOOK: query: explain select key, cbo_t1.c_int, cbo_t2.p, q from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p right outer join (select key as a, c_int as b, cbo_t3.c_float as c from cbo_t3) cbo_t3 on cbo_t1.key=a POSTHOOK: type: QUERY @@ -647,14 +643,12 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: -- Merge join into multijoin operator 2 -explain select key, c_int, cbo_t2.p, cbo_t2.q, cbo_t3.x, cbo_t4.b from cbo_t1 join +PREHOOK: query: explain select key, c_int, cbo_t2.p, cbo_t2.q, cbo_t3.x, cbo_t4.b from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p right outer join (select cbo_t3.key as x, cbo_t3.c_int as y, c_float as z from cbo_t3) cbo_t3 on cbo_t1.key=x left outer join (select key as a, c_int as b, c_float as c from cbo_t1) cbo_t4 on cbo_t1.key=a PREHOOK: type: QUERY -POSTHOOK: query: -- Merge join into multijoin operator 2 -explain select key, c_int, cbo_t2.p, cbo_t2.q, cbo_t3.x, cbo_t4.b from cbo_t1 join +POSTHOOK: query: explain select key, c_int, cbo_t2.p, cbo_t2.q, cbo_t3.x, cbo_t4.b from cbo_t1 join (select cbo_t2.key as p, cbo_t2.c_int as q, c_float as r from cbo_t2) cbo_t2 on cbo_t1.key=p right outer join (select cbo_t3.key as x, cbo_t3.c_int as y, c_float as z from cbo_t3) cbo_t3 on cbo_t1.key=x left outer join (select key as a, c_int as b, c_float as c from cbo_t1) cbo_t4 on cbo_t1.key=a diff --git a/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out b/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out index 200b8ee..44f2994 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_outer_join_ppr.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED FROM src a FULL OUTER JOIN @@ -9,9 +7,7 @@ EXPLAIN EXTENDED SELECT a.key, a.value, b.key, b.value WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED FROM src a FULL OUTER JOIN diff --git a/ql/src/test/results/clientpositive/cbo_rp_simple_select.q.out b/ql/src/test/results/clientpositive/cbo_rp_simple_select.q.out index d161d9f..2e06e61 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_simple_select.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_simple_select.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- 1. Test Select + TS -select * from cbo_t1 +PREHOOK: query: select * from cbo_t1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 1. Test Select + TS -select * from cbo_t1 +POSTHOOK: query: select * from cbo_t1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 @@ -130,14 +128,12 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -PREHOOK: query: -- 2. Test Select + TS + FIL -select * from cbo_t1 where cbo_t1.c_int >= 0 +PREHOOK: query: select * from cbo_t1 where cbo_t1.c_int >= 0 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 2. Test Select + TS + FIL -select * from cbo_t1 where cbo_t1.c_int >= 0 +POSTHOOK: query: select * from cbo_t1 where cbo_t1.c_int >= 0 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 @@ -244,14 +240,12 @@ POSTHOOK: Input: default@cbo_t1@dt=2014 1 1 25.0 1 1 25.0 1 1 25.0 -PREHOOK: query: -- 3 Test Select + Select + TS + FIL -select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 +PREHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 3 Test Select + Select + TS + FIL -select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 +POSTHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 @@ -694,13 +688,11 @@ POSTHOOK: Input: default@cbo_t1@dt=2014 2.0 1 25.0 2.0 1 25.0 2.0 1 25.0 -PREHOOK: query: -- 13. null expr in select list -select null from cbo_t3 +PREHOOK: query: select null from cbo_t3 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t3 #### A masked pattern was here #### -POSTHOOK: query: -- 13. null expr in select list -select null from cbo_t3 +POSTHOOK: query: select null from cbo_t3 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t3 #### A masked pattern was here #### @@ -724,28 +716,24 @@ NULL NULL NULL NULL -PREHOOK: query: -- 14. unary operator -select key from cbo_t1 where c_int = -6 or c_int = +6 +PREHOOK: query: select key from cbo_t1 where c_int = -6 or c_int = +6 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 14. unary operator -select key from cbo_t1 where c_int = -6 or c_int = +6 +POSTHOOK: query: select key from cbo_t1 where c_int = -6 or c_int = +6 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -PREHOOK: query: -- 15. query referencing only partition columns -select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' +PREHOOK: query: select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 PREHOOK: Input: default@cbo_t2 PREHOOK: Input: default@cbo_t2@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 15. query referencing only partition columns -select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' +POSTHOOK: query: select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/cbo_rp_stats.q.out b/ql/src/test/results/clientpositive/cbo_rp_stats.q.out index 554a8f0..3747d31 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_stats.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_stats.q.out @@ -1,12 +1,10 @@ -PREHOOK: query: -- 20. Test get stats with empty partition list -select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true +PREHOOK: query: select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t2 PREHOOK: Input: default@cbo_t2@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 20. Test get stats with empty partition list -select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true +POSTHOOK: query: select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t2 diff --git a/ql/src/test/results/clientpositive/cbo_rp_subq_exists.q.out b/ql/src/test/results/clientpositive/cbo_rp_subq_exists.q.out index 50bfbe2..45f4524 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_subq_exists.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_subq_exists.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- 18. SubQueries Not Exists --- distinct, corr -select * +PREHOOK: query: select * from src_cbo b where not exists (select distinct a.key @@ -10,9 +8,7 @@ where not exists PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- 18. SubQueries Not Exists --- distinct, corr -select * +POSTHOOK: query: select * from src_cbo b where not exists (select distinct a.key @@ -141,8 +137,7 @@ POSTHOOK: Input: default@src_cbo 199 val_199 199 val_199 2 val_2 -PREHOOK: query: -- no agg, corr, having -select * +PREHOOK: query: select * from src_cbo b group by key, value having not exists @@ -153,8 +148,7 @@ having not exists PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- no agg, corr, having -select * +POSTHOOK: query: select * from src_cbo b group by key, value having not exists @@ -179,9 +173,7 @@ POSTHOOK: Input: default@src_cbo 118 val_118 119 val_119 12 val_12 -PREHOOK: query: -- 19. SubQueries Exists --- view test -create view cv1 as +PREHOOK: query: create view cv1 as select * from src_cbo b where exists @@ -192,9 +184,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src_cbo PREHOOK: Output: database:default PREHOOK: Output: default@cv1 -POSTHOOK: query: -- 19. SubQueries Exists --- view test -create view cv1 as +POSTHOOK: query: create view cv1 as select * from src_cbo b where exists @@ -226,8 +216,7 @@ POSTHOOK: Input: default@src_cbo 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- sq in from -select * +PREHOOK: query: select * from (select * from src_cbo b where exists @@ -238,8 +227,7 @@ from (select * PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- sq in from -select * +POSTHOOK: query: select * from (select * from src_cbo b where exists @@ -261,8 +249,7 @@ POSTHOOK: Input: default@src_cbo 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- sq in from, having -select * +PREHOOK: query: select * from (select b.key, count(*) from src_cbo b group by b.key @@ -275,8 +262,7 @@ from (select b.key, count(*) PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- sq in from, having -select * +POSTHOOK: query: select * from (select b.key, count(*) from src_cbo b group by b.key diff --git a/ql/src/test/results/clientpositive/cbo_rp_udaf_percentile_approx_23.q.out b/ql/src/test/results/clientpositive/cbo_rp_udaf_percentile_approx_23.q.out index 289e3e7..12d9c10 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_udaf_percentile_approx_23.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_udaf_percentile_approx_23.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- 0.23 changed input order of data in reducer task, which affects result of percentile_approx - -CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- 0.23 changed input order of data in reducer task, which affects result of percentile_approx - -CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket @@ -140,8 +134,7 @@ POSTHOOK: query: create table t12 (result array) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t12 -PREHOOK: query: -- disable map-side aggregation -FROM bucket +PREHOOK: query: FROM bucket insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) @@ -171,8 +164,7 @@ PREHOOK: Output: default@t6 PREHOOK: Output: default@t7 PREHOOK: Output: default@t8 PREHOOK: Output: default@t9 -POSTHOOK: query: -- disable map-side aggregation -FROM bucket +POSTHOOK: query: FROM bucket insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) @@ -322,8 +314,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@t12 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] -PREHOOK: query: -- enable map-side aggregation -FROM bucket +PREHOOK: query: FROM bucket insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) @@ -353,8 +344,7 @@ PREHOOK: Output: default@t6 PREHOOK: Output: default@t7 PREHOOK: Output: default@t8 PREHOOK: Output: default@t9 -POSTHOOK: query: -- enable map-side aggregation -FROM bucket +POSTHOOK: query: FROM bucket insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) @@ -504,12 +494,10 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@t12 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] -PREHOOK: query: -- NaN -explain +PREHOOK: query: explain select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket PREHOOK: type: QUERY -POSTHOOK: query: -- NaN -explain +POSTHOOK: query: explain select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -565,12 +553,10 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@bucket #### A masked pattern was here #### true -PREHOOK: query: -- with CBO -explain +PREHOOK: query: explain select percentile_approx(key, 0.5) from bucket PREHOOK: type: QUERY -POSTHOOK: query: -- with CBO -explain +POSTHOOK: query: explain select percentile_approx(key, 0.5) from bucket POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/cbo_rp_udf_percentile.q.out b/ql/src/test/results/clientpositive/cbo_rp_udf_percentile.q.out index 3f8890b..e3033e7 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_udf_percentile.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_udf_percentile.q.out @@ -10,9 +10,7 @@ POSTHOOK: type: DESCFUNCTION percentile(expr, pc) - Returns the percentile(s) of expr at pc (range: [0,1]).pc can be a double or double array Function class:org.apache.hadoop.hive.ql.udf.UDAFPercentile Function type:BUILTIN -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT CAST(key AS INT) DIV 10, +PREHOOK: query: SELECT CAST(key AS INT) DIV 10, percentile(CAST(substr(value, 5) AS INT), 0.0), percentile(CAST(substr(value, 5) AS INT), 0.5), percentile(CAST(substr(value, 5) AS INT), 1.0), @@ -22,9 +20,7 @@ GROUP BY CAST(key AS INT) DIV 10 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT CAST(key AS INT) DIV 10, +POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, percentile(CAST(substr(value, 5) AS INT), 0.0), percentile(CAST(substr(value, 5) AS INT), 0.5), percentile(CAST(substr(value, 5) AS INT), 1.0), @@ -294,8 +290,7 @@ POSTHOOK: Input: default@src 7 70.0 73.0 78.0 [70.0,73.0,77.91000000000001,78.0] 8 80.0 84.0 87.0 [80.0,84.0,86.92,87.0] 9 90.0 95.0 98.0 [90.0,95.0,98.0,98.0] -PREHOOK: query: -- test null handling -SELECT CAST(key AS INT) DIV 10, +PREHOOK: query: SELECT CAST(key AS INT) DIV 10, percentile(NULL, 0.0), percentile(NULL, array(0.0, 0.5, 0.99, 1.0)) FROM src @@ -303,8 +298,7 @@ GROUP BY CAST(key AS INT) DIV 10 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- test null handling -SELECT CAST(key AS INT) DIV 10, +POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, percentile(NULL, 0.0), percentile(NULL, array(0.0, 0.5, 0.99, 1.0)) FROM src @@ -362,8 +356,7 @@ POSTHOOK: Input: default@src 7 NULL NULL 8 NULL NULL 9 NULL NULL -PREHOOK: query: -- test empty array handling -SELECT CAST(key AS INT) DIV 10, +PREHOOK: query: SELECT CAST(key AS INT) DIV 10, percentile(IF(CAST(key AS INT) DIV 10 < 5, 1, NULL), 0.5), percentile(IF(CAST(key AS INT) DIV 10 < 5, 1, NULL), array(0.0, 0.5, 0.99, 1.0)) FROM src @@ -371,8 +364,7 @@ GROUP BY CAST(key AS INT) DIV 10 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- test empty array handling -SELECT CAST(key AS INT) DIV 10, +POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, percentile(IF(CAST(key AS INT) DIV 10 < 5, 1, NULL), 0.5), percentile(IF(CAST(key AS INT) DIV 10 < 5, 1, NULL), array(0.0, 0.5, 0.99, 1.0)) FROM src @@ -439,13 +431,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### NULL -PREHOOK: query: -- test where percentile list is empty -select percentile(cast(key as bigint), array()) from src where false +PREHOOK: query: select percentile(cast(key as bigint), array()) from src where false PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- test where percentile list is empty -select percentile(cast(key as bigint), array()) from src where false +POSTHOOK: query: select percentile(cast(key as bigint), array()) from src where false POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/cbo_rp_udf_percentile2.q.out b/ql/src/test/results/clientpositive/cbo_rp_udf_percentile2.q.out index 66d4f16..769f51f 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_udf_percentile2.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_udf_percentile2.q.out @@ -10,9 +10,7 @@ POSTHOOK: type: DESCFUNCTION percentile(expr, pc) - Returns the percentile(s) of expr at pc (range: [0,1]).pc can be a double or double array Function class:org.apache.hadoop.hive.ql.udf.UDAFPercentile Function type:BUILTIN -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT CAST(key AS INT) DIV 10, +PREHOOK: query: SELECT CAST(key AS INT) DIV 10, count(distinct(value)), percentile(CAST(substr(value, 5) AS INT), 0.0), percentile(CAST(substr(value, 5) AS INT), 0.5), @@ -23,9 +21,7 @@ GROUP BY CAST(key AS INT) DIV 10 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT CAST(key AS INT) DIV 10, +POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, count(distinct(value)), percentile(CAST(substr(value, 5) AS INT), 0.0), percentile(CAST(substr(value, 5) AS INT), 0.5), diff --git a/ql/src/test/results/clientpositive/cbo_rp_union.q.out b/ql/src/test/results/clientpositive/cbo_rp_union.q.out index fb86d22..f6f36f6 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_union.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_union.q.out @@ -1,17 +1,11 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 11. Union All -select * from (select * from cbo_t1 order by key, c_boolean, value, dt)a union all select * from (select * from cbo_t2 order by key, c_boolean, value, dt)b +PREHOOK: query: select * from (select * from cbo_t1 order by key, c_boolean, value, dt)a union all select * from (select * from cbo_t2 order by key, c_boolean, value, dt)b PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 PREHOOK: Input: default@cbo_t2 PREHOOK: Input: default@cbo_t2@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 11. Union All -select * from (select * from cbo_t1 order by key, c_boolean, value, dt)a union all select * from (select * from cbo_t2 order by key, c_boolean, value, dt)b +POSTHOOK: query: select * from (select * from cbo_t1 order by key, c_boolean, value, dt)a union all select * from (select * from cbo_t2 order by key, c_boolean, value, dt)b POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/cbo_rp_windowing.q.out b/ql/src/test/results/clientpositive/cbo_rp_windowing.q.out index 52b584a..f1913d7 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_windowing.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_windowing.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- 9. Test Windowing Functions --- SORT_QUERY_RESULTS - -select count(c_int) over() from cbo_t1 +PREHOOK: query: select count(c_int) over() from cbo_t1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 9. Test Windowing Functions --- SORT_QUERY_RESULTS - -select count(c_int) over() from cbo_t1 +POSTHOOK: query: select count(c_int) over() from cbo_t1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/cbo_simple_select.q.out b/ql/src/test/results/clientpositive/cbo_simple_select.q.out index d161d9f..2e06e61 100644 --- a/ql/src/test/results/clientpositive/cbo_simple_select.q.out +++ b/ql/src/test/results/clientpositive/cbo_simple_select.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- 1. Test Select + TS -select * from cbo_t1 +PREHOOK: query: select * from cbo_t1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 1. Test Select + TS -select * from cbo_t1 +POSTHOOK: query: select * from cbo_t1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 @@ -130,14 +128,12 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -PREHOOK: query: -- 2. Test Select + TS + FIL -select * from cbo_t1 where cbo_t1.c_int >= 0 +PREHOOK: query: select * from cbo_t1 where cbo_t1.c_int >= 0 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 2. Test Select + TS + FIL -select * from cbo_t1 where cbo_t1.c_int >= 0 +POSTHOOK: query: select * from cbo_t1 where cbo_t1.c_int >= 0 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 @@ -244,14 +240,12 @@ POSTHOOK: Input: default@cbo_t1@dt=2014 1 1 25.0 1 1 25.0 1 1 25.0 -PREHOOK: query: -- 3 Test Select + Select + TS + FIL -select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 +PREHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 3 Test Select + Select + TS + FIL -select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 +POSTHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 @@ -694,13 +688,11 @@ POSTHOOK: Input: default@cbo_t1@dt=2014 2.0 1 25.0 2.0 1 25.0 2.0 1 25.0 -PREHOOK: query: -- 13. null expr in select list -select null from cbo_t3 +PREHOOK: query: select null from cbo_t3 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t3 #### A masked pattern was here #### -POSTHOOK: query: -- 13. null expr in select list -select null from cbo_t3 +POSTHOOK: query: select null from cbo_t3 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t3 #### A masked pattern was here #### @@ -724,28 +716,24 @@ NULL NULL NULL NULL -PREHOOK: query: -- 14. unary operator -select key from cbo_t1 where c_int = -6 or c_int = +6 +PREHOOK: query: select key from cbo_t1 where c_int = -6 or c_int = +6 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 14. unary operator -select key from cbo_t1 where c_int = -6 or c_int = +6 +POSTHOOK: query: select key from cbo_t1 where c_int = -6 or c_int = +6 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -PREHOOK: query: -- 15. query referencing only partition columns -select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' +PREHOOK: query: select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 PREHOOK: Input: default@cbo_t2 PREHOOK: Input: default@cbo_t2@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 15. query referencing only partition columns -select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' +POSTHOOK: query: select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/cbo_stats.q.out b/ql/src/test/results/clientpositive/cbo_stats.q.out index 554a8f0..3747d31 100644 --- a/ql/src/test/results/clientpositive/cbo_stats.q.out +++ b/ql/src/test/results/clientpositive/cbo_stats.q.out @@ -1,12 +1,10 @@ -PREHOOK: query: -- 20. Test get stats with empty partition list -select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true +PREHOOK: query: select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t2 PREHOOK: Input: default@cbo_t2@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 20. Test get stats with empty partition list -select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true +POSTHOOK: query: select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t2 diff --git a/ql/src/test/results/clientpositive/cbo_subq_exists.q.out b/ql/src/test/results/clientpositive/cbo_subq_exists.q.out index 50bfbe2..45f4524 100644 --- a/ql/src/test/results/clientpositive/cbo_subq_exists.q.out +++ b/ql/src/test/results/clientpositive/cbo_subq_exists.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- 18. SubQueries Not Exists --- distinct, corr -select * +PREHOOK: query: select * from src_cbo b where not exists (select distinct a.key @@ -10,9 +8,7 @@ where not exists PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- 18. SubQueries Not Exists --- distinct, corr -select * +POSTHOOK: query: select * from src_cbo b where not exists (select distinct a.key @@ -141,8 +137,7 @@ POSTHOOK: Input: default@src_cbo 199 val_199 199 val_199 2 val_2 -PREHOOK: query: -- no agg, corr, having -select * +PREHOOK: query: select * from src_cbo b group by key, value having not exists @@ -153,8 +148,7 @@ having not exists PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- no agg, corr, having -select * +POSTHOOK: query: select * from src_cbo b group by key, value having not exists @@ -179,9 +173,7 @@ POSTHOOK: Input: default@src_cbo 118 val_118 119 val_119 12 val_12 -PREHOOK: query: -- 19. SubQueries Exists --- view test -create view cv1 as +PREHOOK: query: create view cv1 as select * from src_cbo b where exists @@ -192,9 +184,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src_cbo PREHOOK: Output: database:default PREHOOK: Output: default@cv1 -POSTHOOK: query: -- 19. SubQueries Exists --- view test -create view cv1 as +POSTHOOK: query: create view cv1 as select * from src_cbo b where exists @@ -226,8 +216,7 @@ POSTHOOK: Input: default@src_cbo 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- sq in from -select * +PREHOOK: query: select * from (select * from src_cbo b where exists @@ -238,8 +227,7 @@ from (select * PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- sq in from -select * +POSTHOOK: query: select * from (select * from src_cbo b where exists @@ -261,8 +249,7 @@ POSTHOOK: Input: default@src_cbo 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- sq in from, having -select * +PREHOOK: query: select * from (select b.key, count(*) from src_cbo b group by b.key @@ -275,8 +262,7 @@ from (select b.key, count(*) PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- sq in from, having -select * +POSTHOOK: query: select * from (select b.key, count(*) from src_cbo b group by b.key diff --git a/ql/src/test/results/clientpositive/cbo_union.q.out b/ql/src/test/results/clientpositive/cbo_union.q.out index fb86d22..f6f36f6 100644 --- a/ql/src/test/results/clientpositive/cbo_union.q.out +++ b/ql/src/test/results/clientpositive/cbo_union.q.out @@ -1,17 +1,11 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 11. Union All -select * from (select * from cbo_t1 order by key, c_boolean, value, dt)a union all select * from (select * from cbo_t2 order by key, c_boolean, value, dt)b +PREHOOK: query: select * from (select * from cbo_t1 order by key, c_boolean, value, dt)a union all select * from (select * from cbo_t2 order by key, c_boolean, value, dt)b PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 PREHOOK: Input: default@cbo_t2 PREHOOK: Input: default@cbo_t2@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 11. Union All -select * from (select * from cbo_t1 order by key, c_boolean, value, dt)a union all select * from (select * from cbo_t2 order by key, c_boolean, value, dt)b +POSTHOOK: query: select * from (select * from cbo_t1 order by key, c_boolean, value, dt)a union all select * from (select * from cbo_t2 order by key, c_boolean, value, dt)b POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/cbo_windowing.q.out b/ql/src/test/results/clientpositive/cbo_windowing.q.out index 52b584a..f1913d7 100644 --- a/ql/src/test/results/clientpositive/cbo_windowing.q.out +++ b/ql/src/test/results/clientpositive/cbo_windowing.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- 9. Test Windowing Functions --- SORT_QUERY_RESULTS - -select count(c_int) over() from cbo_t1 +PREHOOK: query: select count(c_int) over() from cbo_t1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 9. Test Windowing Functions --- SORT_QUERY_RESULTS - -select count(c_int) over() from cbo_t1 +POSTHOOK: query: select count(c_int) over() from cbo_t1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/char_1.q.out b/ql/src/test/results/clientpositive/char_1.q.out index 615f029..2f155a8 100644 --- a/ql/src/test/results/clientpositive/char_1.q.out +++ b/ql/src/test/results/clientpositive/char_1.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -drop table char1 +PREHOOK: query: drop table char1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -drop table char1 +POSTHOOK: query: drop table char1 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table char1_1 PREHOOK: type: DROPTABLE @@ -26,13 +22,11 @@ POSTHOOK: query: create table char1_1 (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@char1_1 -PREHOOK: query: -- load from file -load data local inpath '../../data/files/srcbucket0.txt' overwrite into table char1 +PREHOOK: query: load data local inpath '../../data/files/srcbucket0.txt' overwrite into table char1 PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@char1 -POSTHOOK: query: -- load from file -load data local inpath '../../data/files/srcbucket0.txt' overwrite into table char1 +POSTHOOK: query: load data local inpath '../../data/files/srcbucket0.txt' overwrite into table char1 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@char1 @@ -46,14 +40,12 @@ POSTHOOK: Input: default@char1 #### A masked pattern was here #### 0 val_0 0 val_0 -PREHOOK: query: -- insert overwrite, from same/different length char -insert overwrite table char1 +PREHOOK: query: insert overwrite table char1 select cast(key as char(10)), cast(value as char(15)) from src order by key, value limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@char1 -POSTHOOK: query: -- insert overwrite, from same/different length char -insert overwrite table char1 +POSTHOOK: query: insert overwrite table char1 select cast(key as char(10)), cast(value as char(15)) from src order by key, value limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -70,14 +62,12 @@ POSTHOOK: Input: default@char1 #### A masked pattern was here #### 0 val_0 0 val_0 -PREHOOK: query: -- insert overwrite, from string -insert overwrite table char1 +PREHOOK: query: insert overwrite table char1 select key, value from src order by key, value limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@char1 -POSTHOOK: query: -- insert overwrite, from string -insert overwrite table char1 +POSTHOOK: query: insert overwrite table char1 select key, value from src order by key, value limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -94,14 +84,12 @@ POSTHOOK: Input: default@char1 #### A masked pattern was here #### 0 val_0 0 val_0 -PREHOOK: query: -- insert string from char -insert overwrite table char1_1 +PREHOOK: query: insert overwrite table char1_1 select key, value from char1 order by key, value limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@char1 PREHOOK: Output: default@char1_1 -POSTHOOK: query: -- insert string from char -insert overwrite table char1_1 +POSTHOOK: query: insert overwrite table char1_1 select key, value from char1 order by key, value limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@char1 @@ -118,14 +106,12 @@ POSTHOOK: Input: default@char1_1 #### A masked pattern was here #### 0 val_0 0 val_0 -PREHOOK: query: -- respect string length -insert overwrite table char1 +PREHOOK: query: insert overwrite table char1 select key, cast(value as char(3)) from src order by key, value limit 2 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@char1 -POSTHOOK: query: -- respect string length -insert overwrite table char1 +POSTHOOK: query: insert overwrite table char1 select key, cast(value as char(3)) from src order by key, value limit 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/char_2.q.out b/ql/src/test/results/clientpositive/char_2.q.out index 801a3f1..9b994e6 100644 --- a/ql/src/test/results/clientpositive/char_2.q.out +++ b/ql/src/test/results/clientpositive/char_2.q.out @@ -47,8 +47,7 @@ val_10 10 1 val_100 200 2 val_103 206 2 val_104 208 2 -PREHOOK: query: -- should match the query from src -select value, sum(cast(key as int)), count(*) numrows +PREHOOK: query: select value, sum(cast(key as int)), count(*) numrows from char_2 group by value order by value asc @@ -56,8 +55,7 @@ limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@char_2 #### A masked pattern was here #### -POSTHOOK: query: -- should match the query from src -select value, sum(cast(key as int)), count(*) numrows +POSTHOOK: query: select value, sum(cast(key as int)), count(*) numrows from char_2 group by value order by value asc @@ -91,8 +89,7 @@ val_97 194 2 val_96 96 1 val_95 190 2 val_92 92 1 -PREHOOK: query: -- should match the query from src -select value, sum(cast(key as int)), count(*) numrows +PREHOOK: query: select value, sum(cast(key as int)), count(*) numrows from char_2 group by value order by value desc @@ -100,8 +97,7 @@ limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@char_2 #### A masked pattern was here #### -POSTHOOK: query: -- should match the query from src -select value, sum(cast(key as int)), count(*) numrows +POSTHOOK: query: select value, sum(cast(key as int)), count(*) numrows from char_2 group by value order by value desc diff --git a/ql/src/test/results/clientpositive/char_cast.q.out b/ql/src/test/results/clientpositive/char_cast.q.out index 025fedb..9b337af 100644 --- a/ql/src/test/results/clientpositive/char_cast.q.out +++ b/ql/src/test/results/clientpositive/char_cast.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- Cast from char to other data types -select +PREHOOK: query: select cast(cast('11' as string) as tinyint), cast(cast('11' as string) as smallint), cast(cast('11' as string) as int), @@ -11,8 +10,7 @@ from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Cast from char to other data types -select +POSTHOOK: query: select cast(cast('11' as string) as tinyint), cast(cast('11' as string) as smallint), cast(cast('11' as string) as int), @@ -80,8 +78,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 2011-01-01 2011-01-01 01:02:03 -PREHOOK: query: -- no tests from string/char to boolean, that conversion doesn't look useful -select +PREHOOK: query: select cast(cast('abc123' as string) as string), cast(cast('abc123' as string) as varchar(10)), cast(cast('abc123' as string) as char(10)) @@ -89,8 +86,7 @@ from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- no tests from string/char to boolean, that conversion doesn't look useful -select +POSTHOOK: query: select cast(cast('abc123' as string) as string), cast(cast('abc123' as string) as varchar(10)), cast(cast('abc123' as string) as char(10)) @@ -133,8 +129,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### abc123 abc123 abc123 -PREHOOK: query: -- cast from other types to char -select +PREHOOK: query: select cast(cast(11 as tinyint) as string), cast(cast(11 as smallint) as string), cast(cast(11 as int) as string), @@ -146,8 +141,7 @@ from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- cast from other types to char -select +POSTHOOK: query: select cast(cast(11 as tinyint) as string), cast(cast(11 as smallint) as string), cast(cast(11 as int) as string), diff --git a/ql/src/test/results/clientpositive/char_comparison.q.out b/ql/src/test/results/clientpositive/char_comparison.q.out index 9493c99..bc96ff5 100644 --- a/ql/src/test/results/clientpositive/char_comparison.q.out +++ b/ql/src/test/results/clientpositive/char_comparison.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- Should all be true -select +PREHOOK: query: select cast('abc' as char(10)) = cast('abc' as char(10)), cast('abc' as char(10)) <= cast('abc' as char(10)), cast('abc' as char(10)) >= cast('abc' as char(10)), @@ -10,8 +9,7 @@ from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Should all be true -select +POSTHOOK: query: select cast('abc' as char(10)) = cast('abc' as char(10)), cast('abc' as char(10)) <= cast('abc' as char(10)), cast('abc' as char(10)) >= cast('abc' as char(10)), @@ -23,8 +21,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### true true true true true true -PREHOOK: query: -- Different char lengths should still compare the same -select +PREHOOK: query: select cast('abc' as char(10)) = cast('abc' as char(3)), cast('abc' as char(10)) <= cast('abc' as char(3)), cast('abc' as char(10)) >= cast('abc' as char(3)), @@ -35,8 +32,7 @@ from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Different char lengths should still compare the same -select +POSTHOOK: query: select cast('abc' as char(10)) = cast('abc' as char(3)), cast('abc' as char(10)) <= cast('abc' as char(3)), cast('abc' as char(10)) >= cast('abc' as char(3)), @@ -48,8 +44,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### true true true true true true -PREHOOK: query: -- Should work with string types as well -select +PREHOOK: query: select cast('abc' as char(10)) = 'abc', cast('abc' as char(10)) <= 'abc', cast('abc' as char(10)) >= 'abc', @@ -60,8 +55,7 @@ from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Should work with string types as well -select +POSTHOOK: query: select cast('abc' as char(10)) = 'abc', cast('abc' as char(10)) <= 'abc', cast('abc' as char(10)) >= 'abc', @@ -73,30 +67,26 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### true true true true true true -PREHOOK: query: -- leading space is significant for char -select +PREHOOK: query: select cast(' abc' as char(10)) <> cast('abc' as char(10)) from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- leading space is significant for char -select +POSTHOOK: query: select cast(' abc' as char(10)) <> cast('abc' as char(10)) from src limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### true -PREHOOK: query: -- trailing space is not significant for char -select +PREHOOK: query: select cast('abc ' as char(10)) = cast('abc' as char(10)) from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- trailing space is not significant for char -select +POSTHOOK: query: select cast('abc ' as char(10)) = cast('abc' as char(10)) from src limit 1 POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/char_join1.q.out b/ql/src/test/results/clientpositive/char_join1.q.out index 48e6ad8..0e35019 100644 --- a/ql/src/test/results/clientpositive/char_join1.q.out +++ b/ql/src/test/results/clientpositive/char_join1.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -drop table char_join1_ch1 +PREHOOK: query: drop table char_join1_ch1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -drop table char_join1_ch1 +POSTHOOK: query: drop table char_join1_ch1 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table char_join1_ch2 PREHOOK: type: DROPTABLE @@ -80,13 +76,11 @@ POSTHOOK: query: load data local inpath '../../data/files/vc1.txt' into table ch POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@char_join1_str -PREHOOK: query: -- Join char with same length char -select * from char_join1_ch1 a join char_join1_ch1 b on (a.c2 = b.c2) +PREHOOK: query: select * from char_join1_ch1 a join char_join1_ch1 b on (a.c2 = b.c2) PREHOOK: type: QUERY PREHOOK: Input: default@char_join1_ch1 #### A masked pattern was here #### -POSTHOOK: query: -- Join char with same length char -select * from char_join1_ch1 a join char_join1_ch1 b on (a.c2 = b.c2) +POSTHOOK: query: select * from char_join1_ch1 a join char_join1_ch1 b on (a.c2 = b.c2) POSTHOOK: type: QUERY POSTHOOK: Input: default@char_join1_ch1 #### A masked pattern was here #### @@ -95,14 +89,12 @@ POSTHOOK: Input: default@char_join1_ch1 2 abc 1 abc 2 abc 2 abc 3 abc 3 abc -PREHOOK: query: -- Join char with different length char -select * from char_join1_ch1 a join char_join1_ch2 b on (a.c2 = b.c2) +PREHOOK: query: select * from char_join1_ch1 a join char_join1_ch2 b on (a.c2 = b.c2) PREHOOK: type: QUERY PREHOOK: Input: default@char_join1_ch1 PREHOOK: Input: default@char_join1_ch2 #### A masked pattern was here #### -POSTHOOK: query: -- Join char with different length char -select * from char_join1_ch1 a join char_join1_ch2 b on (a.c2 = b.c2) +POSTHOOK: query: select * from char_join1_ch1 a join char_join1_ch2 b on (a.c2 = b.c2) POSTHOOK: type: QUERY POSTHOOK: Input: default@char_join1_ch1 POSTHOOK: Input: default@char_join1_ch2 @@ -112,14 +104,12 @@ POSTHOOK: Input: default@char_join1_ch2 2 abc 1 abc 2 abc 2 abc 3 abc 3 abc -PREHOOK: query: -- Join char with string -select * from char_join1_ch1 a join char_join1_str b on (a.c2 = b.c2) +PREHOOK: query: select * from char_join1_ch1 a join char_join1_str b on (a.c2 = b.c2) PREHOOK: type: QUERY PREHOOK: Input: default@char_join1_ch1 PREHOOK: Input: default@char_join1_str #### A masked pattern was here #### -POSTHOOK: query: -- Join char with string -select * from char_join1_ch1 a join char_join1_str b on (a.c2 = b.c2) +POSTHOOK: query: select * from char_join1_ch1 a join char_join1_str b on (a.c2 = b.c2) POSTHOOK: type: QUERY POSTHOOK: Input: default@char_join1_ch1 POSTHOOK: Input: default@char_join1_str diff --git a/ql/src/test/results/clientpositive/char_nested_types.q.out b/ql/src/test/results/clientpositive/char_nested_types.q.out index f989132..84e4a0b 100644 --- a/ql/src/test/results/clientpositive/char_nested_types.q.out +++ b/ql/src/test/results/clientpositive/char_nested_types.q.out @@ -42,13 +42,11 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@char_nested_1 POSTHOOK: Lineage: char_nested_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: char_nested_1.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- arrays -create table char_nested_array (c1 array) +PREHOOK: query: create table char_nested_array (c1 array) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@char_nested_array -POSTHOOK: query: -- arrays -create table char_nested_array (c1 array) +POSTHOOK: query: create table char_nested_array (c1 array) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@char_nested_array @@ -79,13 +77,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@char_nested_array #### A masked pattern was here #### ["val_0 ","val_0 "] -PREHOOK: query: -- maps -create table char_nested_map (c1 map) +PREHOOK: query: create table char_nested_map (c1 map) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@char_nested_map -POSTHOOK: query: -- maps -create table char_nested_map (c1 map) +POSTHOOK: query: create table char_nested_map (c1 map) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@char_nested_map @@ -116,13 +112,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@char_nested_map #### A masked pattern was here #### {0:"val_0 "} -PREHOOK: query: -- structs -create table char_nested_struct (c1 struct) +PREHOOK: query: create table char_nested_struct (c1 struct) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@char_nested_struct -POSTHOOK: query: -- structs -create table char_nested_struct (c1 struct) +POSTHOOK: query: create table char_nested_struct (c1 struct) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@char_nested_struct @@ -159,15 +153,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@char_nested_struct #### A masked pattern was here #### {"a":0,"b":"val_0 ","c":"val_0"} -PREHOOK: query: -- nested type with create table as -create table char_nested_cta as +PREHOOK: query: create table char_nested_cta as select * from char_nested_struct PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@char_nested_struct PREHOOK: Output: database:default PREHOOK: Output: default@char_nested_cta -POSTHOOK: query: -- nested type with create table as -create table char_nested_cta as +POSTHOOK: query: create table char_nested_cta as select * from char_nested_struct POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@char_nested_struct @@ -190,15 +182,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@char_nested_cta #### A masked pattern was here #### {"a":0,"b":"val_0 ","c":"val_0"} -PREHOOK: query: -- nested type with view -create table char_nested_view as +PREHOOK: query: create table char_nested_view as select * from char_nested_struct PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@char_nested_struct PREHOOK: Output: database:default PREHOOK: Output: default@char_nested_view -POSTHOOK: query: -- nested type with view -create table char_nested_view as +POSTHOOK: query: create table char_nested_view as select * from char_nested_struct POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@char_nested_struct diff --git a/ql/src/test/results/clientpositive/char_pad_convert.q.out b/ql/src/test/results/clientpositive/char_pad_convert.q.out index 26102e4..9e58257 100644 --- a/ql/src/test/results/clientpositive/char_pad_convert.q.out +++ b/ql/src/test/results/clientpositive/char_pad_convert.q.out @@ -40,20 +40,14 @@ POSTHOOK: query: load data local inpath '../../data/files/over1k' into table ove POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@over1k -PREHOOK: query: -- Pass non-strings for the first and third arguments to test argument conversion - --- Integers -select lpad(t, 4, ' '), +PREHOOK: query: select lpad(t, 4, ' '), lpad(si, 2, ' '), lpad(i, 9, 'z'), lpad(b, 2, 'a') from over1k limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@over1k #### A masked pattern was here #### -POSTHOOK: query: -- Pass non-strings for the first and third arguments to test argument conversion - --- Integers -select lpad(t, 4, ' '), +POSTHOOK: query: select lpad(t, 4, ' '), lpad(si, 2, ' '), lpad(i, 9, 'z'), lpad(b, 2, 'a') from over1k limit 5 @@ -84,16 +78,14 @@ POSTHOOK: Input: default@over1k 35353535oh 3873my 656196561other 429496745one 11111111oh 3723my 656566565other 429496731one 54545454oh 3173my 655476554other 429496740one -PREHOOK: query: -- Integers -select rpad(t, 4, ' '), +PREHOOK: query: select rpad(t, 4, ' '), rpad(si, 2, ' '), rpad(i, 9, 'z'), rpad(b, 2, 'a') from over1k limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@over1k #### A masked pattern was here #### -POSTHOOK: query: -- Integers -select rpad(t, 4, ' '), +POSTHOOK: query: select rpad(t, 4, ' '), rpad(si, 2, ' '), rpad(i, 9, 'z'), rpad(b, 2, 'a') from over1k limit 5 @@ -124,8 +116,7 @@ oh19191919 my4424 other655536555 one429496738 oh35353535 my3873 other656196561 one429496745 oh11111111 my3723 other656566565 one429496731 oh54545454 my3173 other655476554 one429496740 -PREHOOK: query: -- More -select lpad(f, 4, ' '), +PREHOOK: query: select lpad(f, 4, ' '), lpad(d, 2, ' '), lpad(bo, 9, 'z'), lpad(ts, 2, 'a'), @@ -134,8 +125,7 @@ select lpad(f, 4, ' '), PREHOOK: type: QUERY PREHOOK: Input: default@over1k #### A masked pattern was here #### -POSTHOOK: query: -- More -select lpad(f, 4, ' '), +POSTHOOK: query: select lpad(f, 4, ' '), lpad(d, 2, ' '), lpad(bo, 9, 'z'), lpad(ts, 2, 'a'), diff --git a/ql/src/test/results/clientpositive/char_serde.q.out b/ql/src/test/results/clientpositive/char_serde.q.out index 8f6f8ce..03213b6 100644 --- a/ql/src/test/results/clientpositive/char_serde.q.out +++ b/ql/src/test/results/clientpositive/char_serde.q.out @@ -22,10 +22,7 @@ PREHOOK: query: drop table if exists char_serde_orc PREHOOK: type: DROPTABLE POSTHOOK: query: drop table if exists char_serde_orc POSTHOOK: type: DROPTABLE -PREHOOK: query: -- --- RegexSerDe --- -create table char_serde_regex ( +PREHOOK: query: create table char_serde_regex ( key char(15), value char(20) ) @@ -37,10 +34,7 @@ stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@char_serde_regex -POSTHOOK: query: -- --- RegexSerDe --- -create table char_serde_regex ( +POSTHOOK: query: create table char_serde_regex ( key char(15), value char(20) ) @@ -86,20 +80,14 @@ val_1 2 val_10 1 val_100 2 val_101 2 -PREHOOK: query: -- --- LazyBinary --- -create table char_serde_lb ( +PREHOOK: query: create table char_serde_lb ( key char(15), value char(20) ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@char_serde_lb -POSTHOOK: query: -- --- LazyBinary --- -create table char_serde_lb ( +POSTHOOK: query: create table char_serde_lb ( key char(15), value char(20) ) @@ -152,20 +140,14 @@ val_1 2 val_10 1 val_100 2 val_101 2 -PREHOOK: query: -- --- LazySimple --- -create table char_serde_ls ( +PREHOOK: query: create table char_serde_ls ( key char(15), value char(20) ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@char_serde_ls -POSTHOOK: query: -- --- LazySimple --- -create table char_serde_ls ( +POSTHOOK: query: create table char_serde_ls ( key char(15), value char(20) ) @@ -218,20 +200,14 @@ val_1 2 val_10 1 val_100 2 val_101 2 -PREHOOK: query: -- --- Columnar --- -create table char_serde_c ( +PREHOOK: query: create table char_serde_c ( key char(15), value char(20) ) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@char_serde_c -POSTHOOK: query: -- --- Columnar --- -create table char_serde_c ( +POSTHOOK: query: create table char_serde_c ( key char(15), value char(20) ) stored as rcfile @@ -284,20 +260,14 @@ val_1 2 val_10 1 val_100 2 val_101 2 -PREHOOK: query: -- --- LazyBinaryColumnar --- -create table char_serde_lbc ( +PREHOOK: query: create table char_serde_lbc ( key char(15), value char(20) ) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@char_serde_lbc -POSTHOOK: query: -- --- LazyBinaryColumnar --- -create table char_serde_lbc ( +POSTHOOK: query: create table char_serde_lbc ( key char(15), value char(20) ) stored as rcfile @@ -350,20 +320,14 @@ val_1 2 val_10 1 val_100 2 val_101 2 -PREHOOK: query: -- --- ORC --- -create table char_serde_orc ( +PREHOOK: query: create table char_serde_orc ( key char(15), value char(20) ) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@char_serde_orc -POSTHOOK: query: -- --- ORC --- -create table char_serde_orc ( +POSTHOOK: query: create table char_serde_orc ( key char(15), value char(20) ) stored as orc diff --git a/ql/src/test/results/clientpositive/char_udf1.q.out b/ql/src/test/results/clientpositive/char_udf1.q.out index d84237a..07ce108 100644 --- a/ql/src/test/results/clientpositive/char_udf1.q.out +++ b/ql/src/test/results/clientpositive/char_udf1.q.out @@ -24,8 +24,7 @@ POSTHOOK: Lineage: char_udf_1.c1 SIMPLE [(src)src.FieldSchema(name:key, type:str POSTHOOK: Lineage: char_udf_1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: char_udf_1.c3 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: char_udf_1.c4 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- UDFs with char support -select +PREHOOK: query: select concat(c1, c2), concat(c3, c4), concat(c1, c2) = concat(c3, c4) @@ -33,8 +32,7 @@ from char_udf_1 limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@char_udf_1 #### A masked pattern was here #### -POSTHOOK: query: -- UDFs with char support -select +POSTHOOK: query: select concat(c1, c2), concat(c3, c4), concat(c1, c2) = concat(c3, c4) @@ -77,8 +75,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@char_udf_1 #### A masked pattern was here #### val_238 val_238 true -PREHOOK: query: -- Scalar UDFs -select +PREHOOK: query: select ascii(c2), ascii(c4), ascii(c2) = ascii(c4) @@ -86,8 +83,7 @@ from char_udf_1 limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@char_udf_1 #### A masked pattern was here #### -POSTHOOK: query: -- Scalar UDFs -select +POSTHOOK: query: select ascii(c2), ascii(c4), ascii(c2) = ascii(c4) @@ -215,10 +211,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@char_udf_1 #### A masked pattern was here #### val_238 val_238 true -PREHOOK: query: -- In hive wiki page https://cwiki.apache.org/confluence/display/Hive/LanguageManual+UDF --- we only allow A regexp B, not regexp (A,B). - -select +PREHOOK: query: select c2 regexp 'val', c4 regexp 'val', (c2 regexp 'val') = (c4 regexp 'val') @@ -226,10 +219,7 @@ from char_udf_1 limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@char_udf_1 #### A masked pattern was here #### -POSTHOOK: query: -- In hive wiki page https://cwiki.apache.org/confluence/display/Hive/LanguageManual+UDF --- we only allow A regexp B, not regexp (A,B). - -select +POSTHOOK: query: select c2 regexp 'val', c4 regexp 'val', (c2 regexp 'val') = (c4 regexp 'val') @@ -402,16 +392,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@char_udf_1 #### A masked pattern was here #### val_238 val_238 true -PREHOOK: query: -- Aggregate Functions -select +PREHOOK: query: select compute_stats(c2, 16), compute_stats(c4, 16) from char_udf_1 PREHOOK: type: QUERY PREHOOK: Input: default@char_udf_1 #### A masked pattern was here #### -POSTHOOK: query: -- Aggregate Functions -select +POSTHOOK: query: select compute_stats(c2, 16), compute_stats(c4, 16) from char_udf_1 diff --git a/ql/src/test/results/clientpositive/char_union1.q.out b/ql/src/test/results/clientpositive/char_union1.q.out index 16fffb6..4048882 100644 --- a/ql/src/test/results/clientpositive/char_union1.q.out +++ b/ql/src/test/results/clientpositive/char_union1.q.out @@ -76,8 +76,7 @@ POSTHOOK: query: load data local inpath '../../data/files/vc1.txt' into table ch POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@char_union1_str -PREHOOK: query: -- union char with same length char -select * from ( +PREHOOK: query: select * from ( select * from char_union1_ch1 union all select * from char_union1_ch1 limit 1 @@ -85,8 +84,7 @@ select * from ( PREHOOK: type: QUERY PREHOOK: Input: default@char_union1_ch1 #### A masked pattern was here #### -POSTHOOK: query: -- union char with same length char -select * from ( +POSTHOOK: query: select * from ( select * from char_union1_ch1 union all select * from char_union1_ch1 limit 1 @@ -95,8 +93,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@char_union1_ch1 #### A masked pattern was here #### 1 abc -PREHOOK: query: -- union char with different length char -select * from ( +PREHOOK: query: select * from ( select * from char_union1_ch1 union all select * from char_union1_ch2 limit 1 @@ -105,8 +102,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@char_union1_ch1 PREHOOK: Input: default@char_union1_ch2 #### A masked pattern was here #### -POSTHOOK: query: -- union char with different length char -select * from ( +POSTHOOK: query: select * from ( select * from char_union1_ch1 union all select * from char_union1_ch2 limit 1 @@ -116,8 +112,7 @@ POSTHOOK: Input: default@char_union1_ch1 POSTHOOK: Input: default@char_union1_ch2 #### A masked pattern was here #### 1 abc -PREHOOK: query: -- union char with string -select * from ( +PREHOOK: query: select * from ( select * from char_union1_ch1 union all select * from char_union1_str limit 1 @@ -126,8 +121,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@char_union1_ch1 PREHOOK: Input: default@char_union1_str #### A masked pattern was here #### -POSTHOOK: query: -- union char with string -select * from ( +POSTHOOK: query: select * from ( select * from char_union1_ch1 union all select * from char_union1_str limit 1 diff --git a/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out b/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out index e4dc75e..6c7d0ee 100644 --- a/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out +++ b/ql/src/test/results/clientpositive/columnStatsUpdateForStatsOptimizer_2.q.out @@ -150,14 +150,10 @@ POSTHOOK: query: ALTER TABLE calendar CHANGE year year1 INT POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: default@calendar POSTHOOK: Output: default@calendar -PREHOOK: query: --after patch, should be old stats rather than -1 - -desc formatted calendar +PREHOOK: query: desc formatted calendar PREHOOK: type: DESCTABLE PREHOOK: Input: default@calendar -POSTHOOK: query: --after patch, should be old stats rather than -1 - -desc formatted calendar +POSTHOOK: query: desc formatted calendar POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@calendar # col_name data_type comment @@ -189,13 +185,9 @@ Bucket Columns: [month] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: --but basic/column stats can not be used by optimizer - -explain select max(month) from calendar +PREHOOK: query: explain select max(month) from calendar PREHOOK: type: QUERY -POSTHOOK: query: --but basic/column stats can not be used by optimizer - -explain select max(month) from calendar +POSTHOOK: query: explain select max(month) from calendar POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -311,14 +303,10 @@ PREHOOK: Output: default@calendar POSTHOOK: query: truncate table calendar POSTHOOK: type: TRUNCATETABLE POSTHOOK: Output: default@calendar -PREHOOK: query: --after patch, should be 0 - -desc formatted calendar +PREHOOK: query: desc formatted calendar PREHOOK: type: DESCTABLE PREHOOK: Input: default@calendar -POSTHOOK: query: --after patch, should be 0 - -desc formatted calendar +POSTHOOK: query: desc formatted calendar POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@calendar # col_name data_type comment @@ -351,13 +339,9 @@ Bucket Columns: [month] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: --but column stats can not be used by optimizer - -explain select max(month) from calendar +PREHOOK: query: explain select max(month) from calendar PREHOOK: type: QUERY -POSTHOOK: query: --but column stats can not be used by optimizer - -explain select max(month) from calendar +POSTHOOK: query: explain select max(month) from calendar POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -412,13 +396,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@calendar #### A masked pattern was here #### NULL -PREHOOK: query: --basic stats can be used by optimizer - -explain select count(1) from calendar +PREHOOK: query: explain select count(1) from calendar PREHOOK: type: QUERY -POSTHOOK: query: --basic stats can be used by optimizer - -explain select count(1) from calendar +POSTHOOK: query: explain select count(1) from calendar POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/columnarserde_create_shortcut.q.out b/ql/src/test/results/clientpositive/columnarserde_create_shortcut.q.out index 4a34cab..018e18f 100644 --- a/ql/src/test/results/clientpositive/columnarserde_create_shortcut.q.out +++ b/ql/src/test/results/clientpositive/columnarserde_create_shortcut.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE columnarserde_create_shortcut(a array, b array, c map, d int, e string) STORED AS RCFILE +PREHOOK: query: CREATE TABLE columnarserde_create_shortcut(a array, b array, c map, d int, e string) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@columnarserde_create_shortcut -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE columnarserde_create_shortcut(a array, b array, c map, d int, e string) STORED AS RCFILE +POSTHOOK: query: CREATE TABLE columnarserde_create_shortcut(a array, b array, c map, d int, e string) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@columnarserde_create_shortcut diff --git a/ql/src/test/results/clientpositive/columnstats_partlvl_dp.q.out b/ql/src/test/results/clientpositive/columnstats_partlvl_dp.q.out index 47fffab..08fb42e 100644 --- a/ql/src/test/results/clientpositive/columnstats_partlvl_dp.q.out +++ b/ql/src/test/results/clientpositive/columnstats_partlvl_dp.q.out @@ -66,12 +66,10 @@ POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@employee_part POSTHOOK: Output: default@employee_part@employeesalary=3000.0/country=UK -PREHOOK: query: -- dynamic partitioning syntax -explain +PREHOOK: query: explain analyze table Employee_Part partition (employeeSalary='4000.0', country) compute statistics for columns employeeName, employeeID PREHOOK: type: QUERY -POSTHOOK: query: -- dynamic partitioning syntax -explain +POSTHOOK: query: explain analyze table Employee_Part partition (employeeSalary='4000.0', country) compute statistics for columns employeeName, employeeID POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -146,12 +144,10 @@ POSTHOOK: Input: default@employee_part # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment employeeName string 0 6 5.142857142857143 6 from deserializer -PREHOOK: query: -- don't specify all partitioning keys -explain +PREHOOK: query: explain analyze table Employee_Part partition (employeeSalary='2000.0') compute statistics for columns employeeID PREHOOK: type: QUERY -POSTHOOK: query: -- don't specify all partitioning keys -explain +POSTHOOK: query: explain analyze table Employee_Part partition (employeeSalary='2000.0') compute statistics for columns employeeID POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -237,12 +233,10 @@ POSTHOOK: Input: default@employee_part # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment employeeID int 16 31 0 9 from deserializer -PREHOOK: query: -- don't specify any partitioning keys -explain +PREHOOK: query: explain analyze table Employee_Part partition (employeeSalary) compute statistics for columns employeeID PREHOOK: type: QUERY -POSTHOOK: query: -- don't specify any partitioning keys -explain +POSTHOOK: query: explain analyze table Employee_Part partition (employeeSalary) compute statistics for columns employeeID POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -415,11 +409,9 @@ POSTHOOK: Input: default@employee_part # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment employeeName string 0 6 5.142857142857143 6 from deserializer -PREHOOK: query: -- partially populated stats -drop table Employee +PREHOOK: query: drop table Employee PREHOOK: type: DROPTABLE -POSTHOOK: query: -- partially populated stats -drop table Employee +POSTHOOK: query: drop table Employee POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE Employee(employeeID int, employeeName String) partitioned by (employeeSalary double, country string) row format delimited fields terminated by '|' stored as textfile @@ -539,13 +531,11 @@ POSTHOOK: Input: default@employee # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment employeeName string 0 6 5.142857142857143 6 from deserializer -PREHOOK: query: -- add columns -alter table Employee add columns (c int ,d string) +PREHOOK: query: alter table Employee add columns (c int ,d string) PREHOOK: type: ALTERTABLE_ADDCOLS PREHOOK: Input: default@employee PREHOOK: Output: default@employee -POSTHOOK: query: -- add columns -alter table Employee add columns (c int ,d string) +POSTHOOK: query: alter table Employee add columns (c int ,d string) POSTHOOK: type: ALTERTABLE_ADDCOLS POSTHOOK: Input: default@employee POSTHOOK: Output: default@employee diff --git a/ql/src/test/results/clientpositive/combine1.q.out b/ql/src/test/results/clientpositive/combine1.q.out index 1c1224a..49b1473 100644 --- a/ql/src/test/results/clientpositive/combine1.q.out +++ b/ql/src/test/results/clientpositive/combine1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table combine1_1(key string, value string) stored as textfile +PREHOOK: query: create table combine1_1(key string, value string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@combine1_1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table combine1_1(key string, value string) stored as textfile +POSTHOOK: query: create table combine1_1(key string, value string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@combine1_1 diff --git a/ql/src/test/results/clientpositive/combine2.q.out b/ql/src/test/results/clientpositive/combine2.q.out index 6616f66..6188345 100644 --- a/ql/src/test/results/clientpositive/combine2.q.out +++ b/ql/src/test/results/clientpositive/combine2.q.out @@ -4,33 +4,15 @@ PREHOOK: Input: database:default POSTHOOK: query: USE default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: -- EXCLUDE_OS_WINDOWS --- excluded on windows because of difference in file name encoding logic - --- SORT_QUERY_RESULTS - -create table combine2(key string) partitioned by (value string) +PREHOOK: query: create table combine2(key string) partitioned by (value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@combine2 -POSTHOOK: query: -- EXCLUDE_OS_WINDOWS --- excluded on windows because of difference in file name encoding logic - --- SORT_QUERY_RESULTS - -create table combine2(key string) partitioned by (value string) +POSTHOOK: query: create table combine2(key string) partitioned by (value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@combine2 -PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) --- This test sets mapred.max.split.size=256 and hive.merge.smallfiles.avgsize=0 --- in an attempt to force the generation of multiple splits and multiple output files. --- However, Hadoop 0.20 is incapable of generating splits smaller than the block size --- when using CombineFileInputFormat, so only one split is generated. This has a --- significant impact on the results results of this test. --- This issue was fixed in MAPREDUCE-2046 which is included in 0.22. - -insert overwrite table combine2 partition(value) +PREHOOK: query: insert overwrite table combine2 partition(value) select * from ( select key, value from src where key < 10 union all @@ -40,15 +22,7 @@ select * from ( PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@combine2 -POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) --- This test sets mapred.max.split.size=256 and hive.merge.smallfiles.avgsize=0 --- in an attempt to force the generation of multiple splits and multiple output files. --- However, Hadoop 0.20 is incapable of generating splits smaller than the block size --- when using CombineFileInputFormat, so only one split is generated. This has a --- significant impact on the results results of this test. --- This issue was fixed in MAPREDUCE-2046 which is included in 0.22. - -insert overwrite table combine2 partition(value) +POSTHOOK: query: insert overwrite table combine2 partition(value) select * from ( select key, value from src where key < 10 union all diff --git a/ql/src/test/results/clientpositive/compustat_avro.q.out b/ql/src/test/results/clientpositive/compustat_avro.q.out index 6144b04..50c03bd 100644 --- a/ql/src/test/results/clientpositive/compustat_avro.q.out +++ b/ql/src/test/results/clientpositive/compustat_avro.q.out @@ -2,9 +2,7 @@ PREHOOK: query: drop table if exists testAvro PREHOOK: type: DROPTABLE POSTHOOK: query: drop table if exists testAvro POSTHOOK: type: DROPTABLE -#### A masked pattern was here #### - -create table testAvro +PREHOOK: query: create table testAvro ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS INPUTFORMAT @@ -15,9 +13,7 @@ create table testAvro PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@testAvro -#### A masked pattern was here #### - -create table testAvro +POSTHOOK: query: create table testAvro ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS INPUTFORMAT diff --git a/ql/src/test/results/clientpositive/compute_stats_binary.q.out b/ql/src/test/results/clientpositive/compute_stats_binary.q.out index db8606f..133c01e 100644 --- a/ql/src/test/results/clientpositive/compute_stats_binary.q.out +++ b/ql/src/test/results/clientpositive/compute_stats_binary.q.out @@ -6,13 +6,11 @@ POSTHOOK: query: create table tab_binary(a binary) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tab_binary -PREHOOK: query: -- insert some data -LOAD DATA LOCAL INPATH "../../data/files/binary.txt" INTO TABLE tab_binary +PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/binary.txt" INTO TABLE tab_binary PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@tab_binary -POSTHOOK: query: -- insert some data -LOAD DATA LOCAL INPATH "../../data/files/binary.txt" INTO TABLE tab_binary +POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/binary.txt" INTO TABLE tab_binary POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@tab_binary @@ -25,13 +23,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tab_binary #### A masked pattern was here #### 10 -PREHOOK: query: -- compute statistical summary of data -select compute_stats(a, 16) from tab_binary +PREHOOK: query: select compute_stats(a, 16) from tab_binary PREHOOK: type: QUERY PREHOOK: Input: default@tab_binary #### A masked pattern was here #### -POSTHOOK: query: -- compute statistical summary of data -select compute_stats(a, 16) from tab_binary +POSTHOOK: query: select compute_stats(a, 16) from tab_binary POSTHOOK: type: QUERY POSTHOOK: Input: default@tab_binary #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/compute_stats_boolean.q.out b/ql/src/test/results/clientpositive/compute_stats_boolean.q.out index 9f16691..be90ee2 100644 --- a/ql/src/test/results/clientpositive/compute_stats_boolean.q.out +++ b/ql/src/test/results/clientpositive/compute_stats_boolean.q.out @@ -6,13 +6,11 @@ POSTHOOK: query: create table tab_bool(a boolean) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tab_bool -PREHOOK: query: -- insert some data -LOAD DATA LOCAL INPATH "../../data/files/bool.txt" INTO TABLE tab_bool +PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/bool.txt" INTO TABLE tab_bool PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@tab_bool -POSTHOOK: query: -- insert some data -LOAD DATA LOCAL INPATH "../../data/files/bool.txt" INTO TABLE tab_bool +POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/bool.txt" INTO TABLE tab_bool POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@tab_bool @@ -25,13 +23,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tab_bool #### A masked pattern was here #### 33 -PREHOOK: query: -- compute statistical summary of data -select compute_stats(a, 16) from tab_bool +PREHOOK: query: select compute_stats(a, 16) from tab_bool PREHOOK: type: QUERY PREHOOK: Input: default@tab_bool #### A masked pattern was here #### -POSTHOOK: query: -- compute statistical summary of data -select compute_stats(a, 16) from tab_bool +POSTHOOK: query: select compute_stats(a, 16) from tab_bool POSTHOOK: type: QUERY POSTHOOK: Input: default@tab_bool #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/compute_stats_date.q.out b/ql/src/test/results/clientpositive/compute_stats_date.q.out index fba8cad..73f08ad 100644 --- a/ql/src/test/results/clientpositive/compute_stats_date.q.out +++ b/ql/src/test/results/clientpositive/compute_stats_date.q.out @@ -18,13 +18,11 @@ POSTHOOK: query: create table tab_date ( POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tab_date -PREHOOK: query: -- insert some data -load data local inpath '../../data/files/flights_join.txt' overwrite into table tab_date +PREHOOK: query: load data local inpath '../../data/files/flights_join.txt' overwrite into table tab_date PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@tab_date -POSTHOOK: query: -- insert some data -load data local inpath '../../data/files/flights_join.txt' overwrite into table tab_date +POSTHOOK: query: load data local inpath '../../data/files/flights_join.txt' overwrite into table tab_date POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@tab_date @@ -37,13 +35,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tab_date #### A masked pattern was here #### 20 -PREHOOK: query: -- compute statistical summary of data -select compute_stats(fl_date, 16) from tab_date +PREHOOK: query: select compute_stats(fl_date, 16) from tab_date PREHOOK: type: QUERY PREHOOK: Input: default@tab_date #### A masked pattern was here #### -POSTHOOK: query: -- compute statistical summary of data -select compute_stats(fl_date, 16) from tab_date +POSTHOOK: query: select compute_stats(fl_date, 16) from tab_date POSTHOOK: type: QUERY POSTHOOK: Input: default@tab_date #### A masked pattern was here #### @@ -116,11 +112,9 @@ POSTHOOK: Input: default@tab_date # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment fl_date date 2000-11-20 2010-10-29 0 18 from deserializer -PREHOOK: query: -- Update stats manually. Try both yyyy-mm-dd and integer value for high/low value -alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0') +PREHOOK: query: alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0') PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: query: -- Update stats manually. Try both yyyy-mm-dd and integer value for high/low value -alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0') +POSTHOOK: query: alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0') POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS PREHOOK: query: describe formatted tab_date fl_date PREHOOK: type: DESCTABLE diff --git a/ql/src/test/results/clientpositive/compute_stats_decimal.q.out b/ql/src/test/results/clientpositive/compute_stats_decimal.q.out index c1a2062..e0584c5 100644 --- a/ql/src/test/results/clientpositive/compute_stats_decimal.q.out +++ b/ql/src/test/results/clientpositive/compute_stats_decimal.q.out @@ -6,13 +6,11 @@ POSTHOOK: query: create table tab_decimal(a decimal(35,3)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tab_decimal -PREHOOK: query: -- insert some data -LOAD DATA LOCAL INPATH "../../data/files/decimal.txt" INTO TABLE tab_decimal +PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/decimal.txt" INTO TABLE tab_decimal PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@tab_decimal -POSTHOOK: query: -- insert some data -LOAD DATA LOCAL INPATH "../../data/files/decimal.txt" INTO TABLE tab_decimal +POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/decimal.txt" INTO TABLE tab_decimal POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@tab_decimal @@ -25,13 +23,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tab_decimal #### A masked pattern was here #### 19 -PREHOOK: query: -- compute statistical summary of data -select compute_stats(a, 18) from tab_decimal +PREHOOK: query: select compute_stats(a, 18) from tab_decimal PREHOOK: type: QUERY PREHOOK: Input: default@tab_decimal #### A masked pattern was here #### -POSTHOOK: query: -- compute statistical summary of data -select compute_stats(a, 18) from tab_decimal +POSTHOOK: query: select compute_stats(a, 18) from tab_decimal POSTHOOK: type: QUERY POSTHOOK: Input: default@tab_decimal #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/compute_stats_double.q.out b/ql/src/test/results/clientpositive/compute_stats_double.q.out index 0a67ecd..5b92173 100644 --- a/ql/src/test/results/clientpositive/compute_stats_double.q.out +++ b/ql/src/test/results/clientpositive/compute_stats_double.q.out @@ -6,13 +6,11 @@ POSTHOOK: query: create table tab_double(a double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tab_double -PREHOOK: query: -- insert some data -LOAD DATA LOCAL INPATH "../../data/files/double.txt" INTO TABLE tab_double +PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/double.txt" INTO TABLE tab_double PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@tab_double -POSTHOOK: query: -- insert some data -LOAD DATA LOCAL INPATH "../../data/files/double.txt" INTO TABLE tab_double +POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/double.txt" INTO TABLE tab_double POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@tab_double @@ -25,13 +23,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tab_double #### A masked pattern was here #### 16 -PREHOOK: query: -- compute statistical summary of data -select compute_stats(a, 16) from tab_double +PREHOOK: query: select compute_stats(a, 16) from tab_double PREHOOK: type: QUERY PREHOOK: Input: default@tab_double #### A masked pattern was here #### -POSTHOOK: query: -- compute statistical summary of data -select compute_stats(a, 16) from tab_double +POSTHOOK: query: select compute_stats(a, 16) from tab_double POSTHOOK: type: QUERY POSTHOOK: Input: default@tab_double #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/compute_stats_empty_table.q.out b/ql/src/test/results/clientpositive/compute_stats_empty_table.q.out index a6cb9af..05042c9 100644 --- a/ql/src/test/results/clientpositive/compute_stats_empty_table.q.out +++ b/ql/src/test/results/clientpositive/compute_stats_empty_table.q.out @@ -15,13 +15,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tab_empty #### A masked pattern was here #### 0 -PREHOOK: query: -- compute statistical summary of data -select compute_stats(a, 16) from tab_empty +PREHOOK: query: select compute_stats(a, 16) from tab_empty PREHOOK: type: QUERY PREHOOK: Input: default@tab_empty #### A masked pattern was here #### -POSTHOOK: query: -- compute statistical summary of data -select compute_stats(a, 16) from tab_empty +POSTHOOK: query: select compute_stats(a, 16) from tab_empty POSTHOOK: type: QUERY POSTHOOK: Input: default@tab_empty #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/compute_stats_long.q.out b/ql/src/test/results/clientpositive/compute_stats_long.q.out index b6f2b10..119d1731 100644 --- a/ql/src/test/results/clientpositive/compute_stats_long.q.out +++ b/ql/src/test/results/clientpositive/compute_stats_long.q.out @@ -6,13 +6,11 @@ POSTHOOK: query: create table tab_int(a int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tab_int -PREHOOK: query: -- insert some data -LOAD DATA LOCAL INPATH "../../data/files/int.txt" INTO TABLE tab_int +PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/int.txt" INTO TABLE tab_int PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@tab_int -POSTHOOK: query: -- insert some data -LOAD DATA LOCAL INPATH "../../data/files/int.txt" INTO TABLE tab_int +POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/int.txt" INTO TABLE tab_int POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@tab_int @@ -25,13 +23,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tab_int #### A masked pattern was here #### 12 -PREHOOK: query: -- compute statistical summary of data -select compute_stats(a, 16) from tab_int +PREHOOK: query: select compute_stats(a, 16) from tab_int PREHOOK: type: QUERY PREHOOK: Input: default@tab_int #### A masked pattern was here #### -POSTHOOK: query: -- compute statistical summary of data -select compute_stats(a, 16) from tab_int +POSTHOOK: query: select compute_stats(a, 16) from tab_int POSTHOOK: type: QUERY POSTHOOK: Input: default@tab_int #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/compute_stats_string.q.out b/ql/src/test/results/clientpositive/compute_stats_string.q.out index fbd0e6d..8c40490 100644 --- a/ql/src/test/results/clientpositive/compute_stats_string.q.out +++ b/ql/src/test/results/clientpositive/compute_stats_string.q.out @@ -6,13 +6,11 @@ POSTHOOK: query: create table tab_string(a string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tab_string -PREHOOK: query: -- insert some data -LOAD DATA LOCAL INPATH "../../data/files/string.txt" INTO TABLE tab_string +PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/string.txt" INTO TABLE tab_string PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@tab_string -POSTHOOK: query: -- insert some data -LOAD DATA LOCAL INPATH "../../data/files/string.txt" INTO TABLE tab_string +POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/string.txt" INTO TABLE tab_string POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@tab_string @@ -25,13 +23,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tab_string #### A masked pattern was here #### 10 -PREHOOK: query: -- compute statistical summary of data -select compute_stats(a, 16) from tab_string +PREHOOK: query: select compute_stats(a, 16) from tab_string PREHOOK: type: QUERY PREHOOK: Input: default@tab_string #### A masked pattern was here #### -POSTHOOK: query: -- compute statistical summary of data -select compute_stats(a, 16) from tab_string +POSTHOOK: query: select compute_stats(a, 16) from tab_string POSTHOOK: type: QUERY POSTHOOK: Input: default@tab_string #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/concat_op.q.out b/ql/src/test/results/clientpositive/concat_op.q.out index dbe51c9..6afefae 100644 --- a/ql/src/test/results/clientpositive/concat_op.q.out +++ b/ql/src/test/results/clientpositive/concat_op.q.out @@ -112,24 +112,20 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 222 -PREHOOK: query: -- || has higher precedence than bitwise ops...so () is neccessary -select '1' || 4 / 2 || 1 + 2 * 1 || (6 & 4) || (1 | 4) +PREHOOK: query: select '1' || 4 / 2 || 1 + 2 * 1 || (6 & 4) || (1 | 4) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: -- || has higher precedence than bitwise ops...so () is neccessary -select '1' || 4 / 2 || 1 + 2 * 1 || (6 & 4) || (1 | 4) +POSTHOOK: query: select '1' || 4 / 2 || 1 + 2 * 1 || (6 & 4) || (1 | 4) POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 12.0345 -PREHOOK: query: -- however ^ is different from the other bitwise ops: -select 0 ^ 1 || '2' || 1 ^ 2 +PREHOOK: query: select 0 ^ 1 || '2' || 1 ^ 2 PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: -- however ^ is different from the other bitwise ops: -select 0 ^ 1 || '2' || 1 ^ 2 +POSTHOOK: query: select 0 ^ 1 || '2' || 1 ^ 2 POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### @@ -280,17 +276,11 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 87 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- check and/or precedence relation; should be true --- (true and false) or (false and true) or true => true psql/mysql/ora/hive --- true and (false or false) and (true or true) => false should not happen -select true and false or false and true or true +PREHOOK: query: select true and false or false and true or true PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: -- check and/or precedence relation; should be true --- (true and false) or (false and true) or true => true psql/mysql/ora/hive --- true and (false or false) and (true or true) => false should not happen -select true and false or false and true or true +POSTHOOK: query: select true and false or false and true or true POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out b/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out index 0ae046a..e68e3ca 100644 --- a/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out +++ b/ql/src/test/results/clientpositive/constantPropagateForSubQuery.q.out @@ -1,12 +1,8 @@ Warning: Shuffle Join JOIN[7][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain extended +PREHOOK: query: explain extended select * from (select a.key as ak, a.value as av, b.key as bk, b.value as bv from src a join src1 b where a.key = '429' ) c PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain extended +POSTHOOK: query: explain extended select * from (select a.key as ak, a.value as av, b.key as bk, b.value as bv from src a join src1 b where a.key = '429' ) c POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/constantfolding.q.out b/ql/src/test/results/clientpositive/constantfolding.q.out index e047827..10e185f 100644 --- a/ql/src/test/results/clientpositive/constantfolding.q.out +++ b/ql/src/test/results/clientpositive/constantfolding.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -select * from (select 'k2' as key, '1 ' as value from src limit 2)b +PREHOOK: query: select * from (select 'k2' as key, '1 ' as value from src limit 2)b union all select * from (select 'k3' as key, '' as value from src limit 2)b union all @@ -8,9 +6,7 @@ select * from (select 'k4' as key, ' ' as value from src limit 2)c PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select * from (select 'k2' as key, '1 ' as value from src limit 2)b +POSTHOOK: query: select * from (select 'k2' as key, '1 ' as value from src limit 2)b union all select * from (select 'k3' as key, '' as value from src limit 2)b union all @@ -172,11 +168,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### NULL -PREHOOK: query: -- numRows: 2 rawDataSize: 80 -explain select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from src +PREHOOK: query: explain select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from src PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 80 -explain select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from src +POSTHOOK: query: explain select cast("1970-12-31 15:59:58.174" as TIMESTAMP) from src POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -195,11 +189,9 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 20000 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- numRows: 2 rawDataSize: 112 -explain select cast("1970-12-31 15:59:58.174" as DATE) from src +PREHOOK: query: explain select cast("1970-12-31 15:59:58.174" as DATE) from src PREHOOK: type: QUERY -POSTHOOK: query: -- numRows: 2 rawDataSize: 112 -explain select cast("1970-12-31 15:59:58.174" as DATE) from src +POSTHOOK: query: explain select cast("1970-12-31 15:59:58.174" as DATE) from src POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/convert_enum_to_string.q.out b/ql/src/test/results/clientpositive/convert_enum_to_string.q.out index a46857f..b9b8324 100644 --- a/ql/src/test/results/clientpositive/convert_enum_to_string.q.out +++ b/ql/src/test/results/clientpositive/convert_enum_to_string.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- Ensure Enum fields are converted to strings (instead of struct) - -create table convert_enum_to_string +PREHOOK: query: create table convert_enum_to_string partitioned by (b string) row format serde "org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer" with serdeproperties ( @@ -9,9 +7,7 @@ create table convert_enum_to_string PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@convert_enum_to_string -POSTHOOK: query: -- Ensure Enum fields are converted to strings (instead of struct) - -create table convert_enum_to_string +POSTHOOK: query: create table convert_enum_to_string partitioned by (b string) row format serde "org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer" with serdeproperties ( diff --git a/ql/src/test/results/clientpositive/correlationoptimizer10.q.out b/ql/src/test/results/clientpositive/correlationoptimizer10.q.out index 81098ea..6745eb4 100644 --- a/ql/src/test/results/clientpositive/correlationoptimizer10.q.out +++ b/ql/src/test/results/clientpositive/correlationoptimizer10.q.out @@ -1,31 +1,11 @@ -PREHOOK: query: -- When Correlation Optimizer is turned off, 4 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery xx and xx join yy. --- This case is used to test LEFT SEMI JOIN since Hive will --- introduce a GroupByOperator before the ReduceSinkOperator of --- the right table (yy in queries below) --- of LEFT SEMI JOIN. - --- SORT_AND_HASH_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, xx.cnt FROM (SELECT x.key as key, count(1) as cnt FROM src1 x JOIN src1 y ON (x.key = y.key) group by x.key) xx LEFT SEMI JOIN src yy ON xx.key=yy.key PREHOOK: type: QUERY -POSTHOOK: query: -- When Correlation Optimizer is turned off, 4 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery xx and xx join yy. --- This case is used to test LEFT SEMI JOIN since Hive will --- introduce a GroupByOperator before the ReduceSinkOperator of --- the right table (yy in queries below) --- of LEFT SEMI JOIN. - --- SORT_AND_HASH_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, xx.cnt FROM (SELECT x.key as key, count(1) as cnt FROM src1 x JOIN src1 y ON (x.key = y.key) group by x.key) xx @@ -371,14 +351,7 @@ POSTHOOK: Input: default@src1 66 1 98 1 XifREjIWNTdZii76gCxhIQ== -PREHOOK: query: -- When Correlation Optimizer is turned off, 4 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery xx and xx join yy. --- This case is used to test LEFT SEMI JOIN since Hive will --- introduce a GroupByOperator before the ReduceSinkOperator of --- the right table (yy in queries below) --- of LEFT SEMI JOIN. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, xx.value FROM src1 xx @@ -389,14 +362,7 @@ LEFT SEMI JOIN y.key > 20) yy ON xx.key=yy.key PREHOOK: type: QUERY -POSTHOOK: query: -- When Correlation Optimizer is turned off, 4 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery xx and xx join yy. --- This case is used to test LEFT SEMI JOIN since Hive will --- introduce a GroupByOperator before the ReduceSinkOperator of --- the right table (yy in queries below) --- of LEFT SEMI JOIN. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, xx.value FROM src1 xx @@ -707,11 +673,7 @@ POSTHOOK: Input: default@src1 66 val_66 98 val_98 yXzkFzMwxxoH+6e+nKoA8A== -PREHOOK: query: -- When Correlation Optimizer is turned off, 4 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- This test is used to test if we can use shared scan for --- xx, yy:x, and yy:y. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, xx.value FROM src xx @@ -721,11 +683,7 @@ LEFT SEMI JOIN WHERE x.key < 200 AND x.key > 180) yy ON xx.key=yy.key PREHOOK: type: QUERY -POSTHOOK: query: -- When Correlation Optimizer is turned off, 4 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- This test is used to test if we can use shared scan for --- xx, yy:x, and yy:y. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, xx.value FROM src xx diff --git a/ql/src/test/results/clientpositive/correlationoptimizer11.q.out b/ql/src/test/results/clientpositive/correlationoptimizer11.q.out index 70aaa28..00006a6 100644 --- a/ql/src/test/results/clientpositive/correlationoptimizer11.q.out +++ b/ql/src/test/results/clientpositive/correlationoptimizer11.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- Tests in this file are used to make sure Correlation Optimizer --- can correctly handle tables with partitions - -CREATE TABLE part_table(key string, value string) PARTITIONED BY (partitionId int) +PREHOOK: query: CREATE TABLE part_table(key string, value string) PARTITIONED BY (partitionId int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@part_table -POSTHOOK: query: -- Tests in this file are used to make sure Correlation Optimizer --- can correctly handle tables with partitions - -CREATE TABLE part_table(key string, value string) PARTITIONED BY (partitionId int) +POSTHOOK: query: CREATE TABLE part_table(key string, value string) PARTITIONED BY (partitionId int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@part_table @@ -36,22 +30,14 @@ POSTHOOK: Input: default@src1 POSTHOOK: Output: default@part_table@partitionid=2 POSTHOOK: Lineage: part_table PARTITION(partitionid=2).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: part_table PARTITION(partitionid=2).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- In this case, we should not do shared scan on part_table --- because left and right tables of JOIN use different partitions --- of part_table. With Correlation Optimizer we will generate --- 1 MR job. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT x.key AS key, count(1) AS cnt FROM part_table x JOIN part_table y ON (x.key = y.key) WHERE x.partitionId = 1 AND y.partitionId = 2 GROUP BY x.key PREHOOK: type: QUERY -POSTHOOK: query: -- In this case, we should not do shared scan on part_table --- because left and right tables of JOIN use different partitions --- of part_table. With Correlation Optimizer we will generate --- 1 MR job. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT x.key AS key, count(1) AS cnt FROM part_table x JOIN part_table y ON (x.key = y.key) WHERE x.partitionId = 1 AND @@ -281,22 +267,14 @@ POSTHOOK: Input: default@part_table@partitionid=2 128 3 146 2 150 1 -PREHOOK: query: -- In this case, we should do shared scan on part_table --- because left and right tables of JOIN use the same partition --- of part_table. With Correlation Optimizer we will generate --- 1 MR job. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT x.key AS key, count(1) AS cnt FROM part_table x JOIN part_table y ON (x.key = y.key) WHERE x.partitionId = 2 AND y.partitionId = 2 GROUP BY x.key PREHOOK: type: QUERY -POSTHOOK: query: -- In this case, we should do shared scan on part_table --- because left and right tables of JOIN use the same partition --- of part_table. With Correlation Optimizer we will generate --- 1 MR job. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT x.key AS key, count(1) AS cnt FROM part_table x JOIN part_table y ON (x.key = y.key) WHERE x.partitionId = 2 AND diff --git a/ql/src/test/results/clientpositive/correlationoptimizer12.q.out b/ql/src/test/results/clientpositive/correlationoptimizer12.q.out index 3d936e5..23443ee 100644 --- a/ql/src/test/results/clientpositive/correlationoptimizer12.q.out +++ b/ql/src/test/results/clientpositive/correlationoptimizer12.q.out @@ -1,13 +1,11 @@ -PREHOOK: query: -- Currently, correlation optimizer does not support PTF operator -EXPLAIN SELECT xx.key, xx.cnt, yy.key, yy.cnt +PREHOOK: query: EXPLAIN SELECT xx.key, xx.cnt, yy.key, yy.cnt FROM (SELECT x.key as key, count(x.value) OVER (PARTITION BY x.key) AS cnt FROM src x) xx JOIN (SELECT y.key as key, count(y.value) OVER (PARTITION BY y.key) AS cnt FROM src1 y) yy ON (xx.key=yy.key) PREHOOK: type: QUERY -POSTHOOK: query: -- Currently, correlation optimizer does not support PTF operator -EXPLAIN SELECT xx.key, xx.cnt, yy.key, yy.cnt +POSTHOOK: query: EXPLAIN SELECT xx.key, xx.cnt, yy.key, yy.cnt FROM (SELECT x.key as key, count(x.value) OVER (PARTITION BY x.key) AS cnt FROM src x) xx JOIN diff --git a/ql/src/test/results/clientpositive/correlationoptimizer13.q.out b/ql/src/test/results/clientpositive/correlationoptimizer13.q.out index c3690a5..0bd27b4 100644 --- a/ql/src/test/results/clientpositive/correlationoptimizer13.q.out +++ b/ql/src/test/results/clientpositive/correlationoptimizer13.q.out @@ -20,11 +20,7 @@ POSTHOOK: Lineage: tmp.c1 EXPRESSION [(src)x.FieldSchema(name:key, type:string, POSTHOOK: Lineage: tmp.c2 EXPRESSION [(src)y.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tmp.c3 SIMPLE [(src)x.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: tmp.c4 SIMPLE [(src)y.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- The query in this file have operators with same set of keys --- but having different sorting orders. --- Correlation optimizer currently do not optimize this case. - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt FROM (SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c3, x.c1) xx @@ -32,11 +28,7 @@ JOIN (SELECT x1.c1 AS key1, x1.c3 AS key2, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c3, x1.c1) yy ON (xx.key1 = yy.key1 AND xx.key2 == yy.key2) ORDER BY xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt PREHOOK: type: QUERY -POSTHOOK: query: -- The query in this file have operators with same set of keys --- but having different sorting orders. --- Correlation optimizer currently do not optimize this case. - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key1, xx.key2, yy.key1, yy.key2, xx.cnt, yy.cnt FROM (SELECT x.c1 AS key1, x.c3 AS key2, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c3, x.c1) xx diff --git a/ql/src/test/results/clientpositive/correlationoptimizer14.q.out b/ql/src/test/results/clientpositive/correlationoptimizer14.q.out index 010d9ce..149f33f 100644 --- a/ql/src/test/results/clientpositive/correlationoptimizer14.q.out +++ b/ql/src/test/results/clientpositive/correlationoptimizer14.q.out @@ -1,13 +1,4 @@ -PREHOOK: query: -- This file is used to show plans of queries involving cluster by, distribute by, --- order by, and sort by. --- Right now, Correlation optimizer check the most restrictive condition --- when determining if a ReduceSinkOperator is not necessary. --- This condition is that two ReduceSinkOperators should have same sorting columns, --- same partitioning columns, same sorting orders and no conflict on the numbers of reducers. - --- Distribute by will not be optimized because distribute by does not introduce --- sorting columns. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, xx.value, yy.key, yy.value FROM (SELECT x.key as key, x.value as value FROM src x DISTRIBUTE BY key) xx @@ -15,16 +6,7 @@ JOIN (SELECT y.key as key, y.value as value FROM src1 y DISTRIBUTE BY key) yy ON (xx.key=yy.key) PREHOOK: type: QUERY -POSTHOOK: query: -- This file is used to show plans of queries involving cluster by, distribute by, --- order by, and sort by. --- Right now, Correlation optimizer check the most restrictive condition --- when determining if a ReduceSinkOperator is not necessary. --- This condition is that two ReduceSinkOperators should have same sorting columns, --- same partitioning columns, same sorting orders and no conflict on the numbers of reducers. - --- Distribute by will not be optimized because distribute by does not introduce --- sorting columns. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, xx.value, yy.key, yy.value FROM (SELECT x.key as key, x.value as value FROM src x DISTRIBUTE BY key) xx @@ -139,8 +121,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Sort by will not be optimized because sort by does not introduce partitioning columns -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, xx.value, yy.key, yy.value FROM (SELECT x.key as key, x.value as value FROM src x SORT BY key) xx @@ -148,8 +129,7 @@ JOIN (SELECT y.key as key, y.value as value FROM src1 y SORT BY key) yy ON (xx.key=yy.key) PREHOOK: type: QUERY -POSTHOOK: query: -- Sort by will not be optimized because sort by does not introduce partitioning columns -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, xx.value, yy.key, yy.value FROM (SELECT x.key as key, x.value as value FROM src x SORT BY key) xx @@ -264,8 +244,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Distribute by and sort by on the same key(s) should be optimized -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, xx.value, yy.key, yy.value FROM (SELECT x.key as key, x.value as value FROM src x DISTRIBUTE BY key SORT BY key) xx @@ -273,8 +252,7 @@ JOIN (SELECT y.key as key, y.value as value FROM src1 y DISTRIBUTE BY key SORT BY key) yy ON (xx.key=yy.key) PREHOOK: type: QUERY -POSTHOOK: query: -- Distribute by and sort by on the same key(s) should be optimized -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, xx.value, yy.key, yy.value FROM (SELECT x.key as key, x.value as value FROM src x DISTRIBUTE BY key SORT BY key) xx @@ -613,9 +591,7 @@ POSTHOOK: Input: default@src1 66 val_66 66 val_66 98 val_98 98 val_98 98 val_98 98 val_98 -PREHOOK: query: -- Because for join we use ascending order, if sort by uses descending order, --- this query will not be optimized -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, xx.value, yy.key, yy.value FROM (SELECT x.key as key, x.value as value FROM src x DISTRIBUTE BY key SORT BY key DESC) xx @@ -623,9 +599,7 @@ JOIN (SELECT y.key as key, y.value as value FROM src1 y DISTRIBUTE BY key SORT BY key DESC) yy ON (xx.key=yy.key) PREHOOK: type: QUERY -POSTHOOK: query: -- Because for join we use ascending order, if sort by uses descending order, --- this query will not be optimized -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, xx.value, yy.key, yy.value FROM (SELECT x.key as key, x.value as value FROM src x DISTRIBUTE BY key SORT BY key DESC) xx @@ -742,9 +716,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Even if hive.optimize.reducededuplication.min.reducer=1, order by will not be optimized --- because order by does not introduce partitioning columns -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, xx.value, yy.key, yy.value FROM (SELECT x.key as key, x.value as value FROM src x ORDER BY key) xx @@ -752,9 +724,7 @@ JOIN (SELECT y.key as key, y.value as value FROM src1 y ORDER BY key) yy ON (xx.key=yy.key) PREHOOK: type: QUERY -POSTHOOK: query: -- Even if hive.optimize.reducededuplication.min.reducer=1, order by will not be optimized --- because order by does not introduce partitioning columns -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, xx.value, yy.key, yy.value FROM (SELECT x.key as key, x.value as value FROM src x ORDER BY key) xx @@ -869,8 +839,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Cluster by will be optimized -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, xx.value, yy.key, yy.value FROM (SELECT x.key as key, x.value as value FROM src x Cluster BY key) xx @@ -878,8 +847,7 @@ JOIN (SELECT y.key as key, y.value as value FROM src1 y Cluster BY key) yy ON (xx.key=yy.key) PREHOOK: type: QUERY -POSTHOOK: query: -- Cluster by will be optimized -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, xx.value, yy.key, yy.value FROM (SELECT x.key as key, x.value as value FROM src x Cluster BY key) xx @@ -1218,9 +1186,7 @@ POSTHOOK: Input: default@src1 66 val_66 66 val_66 98 val_98 98 val_98 98 val_98 98 val_98 -PREHOOK: query: -- If hive.optimize.reducededuplication.min.reducer=1, --- group by and then order by should be optimized -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, xx.value, yy.key, yy.value FROM (SELECT x.key as key, x.value as value FROM src x CLUSTER BY key) xx @@ -1228,9 +1194,7 @@ JOIN (SELECT y.key as key, count(*) as value FROM src1 y GROUP BY y.key ORDER BY key) yy ON (xx.key=yy.key) PREHOOK: type: QUERY -POSTHOOK: query: -- If hive.optimize.reducededuplication.min.reducer=1, --- group by and then order by should be optimized -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, xx.value, yy.key, yy.value FROM (SELECT x.key as key, x.value as value FROM src x CLUSTER BY key) xx diff --git a/ql/src/test/results/clientpositive/correlationoptimizer15.q.out b/ql/src/test/results/clientpositive/correlationoptimizer15.q.out index f5a4c35..a142867 100644 --- a/ql/src/test/results/clientpositive/correlationoptimizer15.q.out +++ b/ql/src/test/results/clientpositive/correlationoptimizer15.q.out @@ -1,21 +1,11 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- When Correlation Optimizer is turned off, 4 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery xx and xx join yy. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, xx.cnt, yy.key FROM (SELECT x.key as key, count(1) as cnt FROM src1 x JOIN src1 y ON (x.key = y.key) group by x.key) xx JOIN src yy ON xx.key=yy.key ORDER BY xx.key, xx.cnt, yy.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- When Correlation Optimizer is turned off, 4 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery xx and xx join yy. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, xx.cnt, yy.key FROM (SELECT x.key as key, count(1) as cnt FROM src1 x JOIN src1 y ON (x.key = y.key) group by x.key) xx diff --git a/ql/src/test/results/clientpositive/correlationoptimizer5.q.out b/ql/src/test/results/clientpositive/correlationoptimizer5.q.out index f8a53a0..00bdb4c 100644 --- a/ql/src/test/results/clientpositive/correlationoptimizer5.q.out +++ b/ql/src/test/results/clientpositive/correlationoptimizer5.q.out @@ -86,9 +86,7 @@ POSTHOOK: query: CREATE TABLE dest_co3(key INT, val STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_co3 -PREHOOK: query: -- When Correlation Optimizer is turned off, 3 MR jobs are needed. --- When Correlation Optimizer is turned on, only a single MR job is needed. -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_co1 SELECT b.key, d.val FROM @@ -97,9 +95,7 @@ JOIN (SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d ON b.key = d.key PREHOOK: type: QUERY -POSTHOOK: query: -- When Correlation Optimizer is turned off, 3 MR jobs are needed. --- When Correlation Optimizer is turned on, only a single MR job is needed. -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_co1 SELECT b.key, d.val FROM @@ -495,8 +491,7 @@ POSTHOOK: Input: default@t4 POSTHOOK: Output: default@dest_co2 POSTHOOK: Lineage: dest_co2.key SIMPLE [(t1)x.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: dest_co2.val SIMPLE [(t4)n.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: -- Enable hive.auto.convert.join. -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_co3 SELECT b.key, d.val FROM @@ -505,8 +500,7 @@ JOIN (SELECT m.key, n.val FROM T3 m JOIN T4 n ON (m.key = n.key)) d ON b.key = d.key PREHOOK: type: QUERY -POSTHOOK: query: -- Enable hive.auto.convert.join. -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_co3 SELECT b.key, d.val FROM @@ -804,17 +798,11 @@ POSTHOOK: Input: default@t4 POSTHOOK: Output: default@dest_co3 POSTHOOK: Lineage: dest_co3.key SIMPLE [(t1)x.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: dest_co3.val SIMPLE [(t4)n.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: -- dest_co1, dest_co2 and dest_co3 should be same --- SELECT * FROM dest_co1 x ORDER BY x.key, x.val; --- SELECT * FROM dest_co2 x ORDER BY x.key, x.val; -SELECT SUM(HASH(key)), SUM(HASH(val)) FROM dest_co1 +PREHOOK: query: SELECT SUM(HASH(key)), SUM(HASH(val)) FROM dest_co1 PREHOOK: type: QUERY PREHOOK: Input: default@dest_co1 #### A masked pattern was here #### -POSTHOOK: query: -- dest_co1, dest_co2 and dest_co3 should be same --- SELECT * FROM dest_co1 x ORDER BY x.key, x.val; --- SELECT * FROM dest_co2 x ORDER BY x.key, x.val; -SELECT SUM(HASH(key)), SUM(HASH(val)) FROM dest_co1 +POSTHOOK: query: SELECT SUM(HASH(key)), SUM(HASH(val)) FROM dest_co1 POSTHOOK: type: QUERY POSTHOOK: Input: default@dest_co1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/correlationoptimizer7.q.out b/ql/src/test/results/clientpositive/correlationoptimizer7.q.out index f2c048e..efcb46b 100644 --- a/ql/src/test/results/clientpositive/correlationoptimizer7.q.out +++ b/ql/src/test/results/clientpositive/correlationoptimizer7.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, xx.cnt, yy.key, yy.value FROM (SELECT x.key AS key, count(1) AS cnt FROM src x JOIN src1 y ON (x.key = y.key) @@ -8,9 +6,7 @@ FROM (SELECT x.key AS key, count(1) AS cnt JOIN src1 yy ON xx.key=yy.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, xx.cnt, yy.key, yy.value FROM (SELECT x.key AS key, count(1) AS cnt FROM src x JOIN src1 y ON (x.key = y.key) @@ -365,13 +361,7 @@ POSTHOOK: Input: default@src1 406 4 406 val_406 66 1 66 val_66 98 2 98 val_98 -PREHOOK: query: -- Without correlation optimizer, we will have 3 MR jobs. --- The first one is a MapJoin and Aggregation (in the Reduce Phase). --- The second one is another MapJoin. The third one is for ordering. --- With the correlation optimizer, right now, we have --- 1 MR jobs, evaluatinf the sub-query xx and the join of --- xx and yy. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, xx.cnt, yy.key, yy.value FROM (SELECT x.key AS key, count(1) AS cnt FROM src x JOIN src1 y ON (x.key = y.key) @@ -379,13 +369,7 @@ FROM (SELECT x.key AS key, count(1) AS cnt JOIN src1 yy ON xx.key=yy.key PREHOOK: type: QUERY -POSTHOOK: query: -- Without correlation optimizer, we will have 3 MR jobs. --- The first one is a MapJoin and Aggregation (in the Reduce Phase). --- The second one is another MapJoin. The third one is for ordering. --- With the correlation optimizer, right now, we have --- 1 MR jobs, evaluatinf the sub-query xx and the join of --- xx and yy. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, xx.cnt, yy.key, yy.value FROM (SELECT x.key AS key, count(1) AS cnt FROM src x JOIN src1 y ON (x.key = y.key) diff --git a/ql/src/test/results/clientpositive/correlationoptimizer8.q.out b/ql/src/test/results/clientpositive/correlationoptimizer8.q.out index 4667149..785fa98 100644 --- a/ql/src/test/results/clientpositive/correlationoptimizer8.q.out +++ b/ql/src/test/results/clientpositive/correlationoptimizer8.q.out @@ -1,12 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- When the Correlation Optimizer is turned off, this query will be evaluated by --- 4 MR jobs. --- When the Correlation Optimizer is turned on, because both inputs of the --- UnionOperator are correlated, we can use 2 MR jobs to evaluate this query. --- The first MR job will evaluate subquery subq1 and subq1 join x. The second --- MR is for ordering. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT x.key, x.value, subq1.cnt FROM ( SELECT x.key as key, count(1) as cnt from src x where x.key < 20 group by x.key @@ -15,15 +7,7 @@ FROM ) subq1 JOIN src1 x ON (x.key = subq1.key) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- When the Correlation Optimizer is turned off, this query will be evaluated by --- 4 MR jobs. --- When the Correlation Optimizer is turned on, because both inputs of the --- UnionOperator are correlated, we can use 2 MR jobs to evaluate this query. --- The first MR job will evaluate subquery subq1 and subq1 join x. The second --- MR is for ordering. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT x.key, x.value, subq1.cnt FROM ( SELECT x.key as key, count(1) as cnt from src x where x.key < 20 group by x.key @@ -426,13 +410,7 @@ POSTHOOK: Input: default@src1 369 3 401 val_401 5 406 val_406 4 -PREHOOK: query: -- When the Correlation Optimizer is turned off, this query will be evaluated by --- 4 MR jobs. --- When the Correlation Optimizer is turned on, because both inputs of the --- UnionOperator are correlated, we can use 2 MR jobs to evaluate this query. --- The first MR job will evaluate subquery subq1 and subq1 join x. The second --- MR is for ordering. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT subq1.key, subq1.cnt, x.key, x.value FROM ( SELECT x.key as key, count(1) as cnt from src x where x.key < 20 group by x.key @@ -441,13 +419,7 @@ FROM ) subq1 LEFT OUTER JOIN src1 x ON (x.key = subq1.key) PREHOOK: type: QUERY -POSTHOOK: query: -- When the Correlation Optimizer is turned off, this query will be evaluated by --- 4 MR jobs. --- When the Correlation Optimizer is turned on, because both inputs of the --- UnionOperator are correlated, we can use 2 MR jobs to evaluate this query. --- The first MR job will evaluate subquery subq1 and subq1 join x. The second --- MR is for ordering. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT subq1.key, subq1.cnt, x.key, x.value FROM ( SELECT x.key as key, count(1) as cnt from src x where x.key < 20 group by x.key @@ -868,10 +840,7 @@ val_278 1 NULL NULL val_311 1 NULL NULL val_401 1 NULL NULL val_406 1 NULL NULL -PREHOOK: query: -- When the Correlation Optimizer is turned on, because a input of UnionOperator is --- not correlated, we cannot handle this case right now. So, this query will not be --- optimized. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT x.key, x.value, subq1.cnt FROM ( SELECT x.key as key, count(1) as cnt from src x where x.key < 20 group by x.key @@ -880,10 +849,7 @@ FROM ) subq1 JOIN src1 x ON (x.key = subq1.key) PREHOOK: type: QUERY -POSTHOOK: query: -- When the Correlation Optimizer is turned on, because a input of UnionOperator is --- not correlated, we cannot handle this case right now. So, this query will not be --- optimized. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT x.key, x.value, subq1.cnt FROM ( SELECT x.key as key, count(1) as cnt from src x where x.key < 20 group by x.key @@ -1046,10 +1012,7 @@ STAGE PLANS: ListSink WARNING: Comparing a bigint and a string may result in a loss of precision. -PREHOOK: query: -- When the Correlation Optimizer is turned on, because a input of UnionOperator is --- not correlated, we cannot handle this case right now. So, this query will not be --- optimized. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT subq1.key, subq1.value, x.key, x.value FROM ( SELECT cast(x.key as INT) as key, count(1) as value from src x where x.key < 20 group by x.key @@ -1058,10 +1021,7 @@ FROM ) subq1 FULL OUTER JOIN src1 x ON (x.key = subq1.key) PREHOOK: type: QUERY -POSTHOOK: query: -- When the Correlation Optimizer is turned on, because a input of UnionOperator is --- not correlated, we cannot handle this case right now. So, this query will not be --- optimized. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT subq1.key, subq1.value, x.key, x.value FROM ( SELECT cast(x.key as INT) as key, count(1) as value from src x where x.key < 20 group by x.key diff --git a/ql/src/test/results/clientpositive/correlationoptimizer9.q.out b/ql/src/test/results/clientpositive/correlationoptimizer9.q.out index 4b4bed2..e3f11ef 100644 --- a/ql/src/test/results/clientpositive/correlationoptimizer9.q.out +++ b/ql/src/test/results/clientpositive/correlationoptimizer9.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tmp(c1 INT, c2 INT, c3 STRING, c4 STRING) +PREHOOK: query: CREATE TABLE tmp(c1 INT, c2 INT, c3 STRING, c4 STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmp -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tmp(c1 INT, c2 INT, c3 STRING, c4 STRING) +POSTHOOK: query: CREATE TABLE tmp(c1 INT, c2 INT, c3 STRING, c4 STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmp @@ -196,8 +192,7 @@ POSTHOOK: Input: default@tmp 116 116 1 1 118 118 4 4 119 119 9 9 -PREHOOK: query: -- The merged table scan should be able to load both c1 and c2 -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, yy.key, xx.cnt, yy.cnt FROM (SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx @@ -205,8 +200,7 @@ JOIN (SELECT x1.c2 AS key, count(1) AS cnt FROM tmp x1 WHERE x1.c2 > 100 GROUP BY x1.c2) yy ON (xx.key = yy.key) PREHOOK: type: QUERY -POSTHOOK: query: -- The merged table scan should be able to load both c1 and c2 -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, yy.key, xx.cnt, yy.cnt FROM (SELECT x.c1 AS key, count(1) AS cnt FROM tmp x WHERE x.c1 < 120 GROUP BY x.c1) xx diff --git a/ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out b/ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out index 216d3be..6af696a 100644 --- a/ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out +++ b/ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out @@ -1,17 +1,9 @@ -PREHOOK: query: -- Test stored as directories --- it covers a few cases - --- 1. create a table with stored as directories -CREATE TABLE if not exists stored_as_dirs_multiple (col1 STRING, col2 int, col3 STRING) +PREHOOK: query: CREATE TABLE if not exists stored_as_dirs_multiple (col1 STRING, col2 int, col3 STRING) SKEWED BY (col1, col2) ON (('s1',1), ('s3',3), ('s13',13), ('s78',78)) stored as DIRECTORIES PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@stored_as_dirs_multiple -POSTHOOK: query: -- Test stored as directories --- it covers a few cases - --- 1. create a table with stored as directories -CREATE TABLE if not exists stored_as_dirs_multiple (col1 STRING, col2 int, col3 STRING) +POSTHOOK: query: CREATE TABLE if not exists stored_as_dirs_multiple (col1 STRING, col2 int, col3 STRING) SKEWED BY (col1, col2) ON (('s1',1), ('s3',3), ('s13',13), ('s78',78)) stored as DIRECTORIES POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -55,13 +47,11 @@ Skewed Columns: [col1, col2] Skewed Values: [[s1, 1], [s13, 13], [s3, 3], [s78, 78]] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- 2. turn off stored as directories but table is still a skewed table -alter table stored_as_dirs_multiple not stored as DIRECTORIES +PREHOOK: query: alter table stored_as_dirs_multiple not stored as DIRECTORIES PREHOOK: type: ALTERTABLE_SKEWED PREHOOK: Input: default@stored_as_dirs_multiple PREHOOK: Output: default@stored_as_dirs_multiple -POSTHOOK: query: -- 2. turn off stored as directories but table is still a skewed table -alter table stored_as_dirs_multiple not stored as DIRECTORIES +POSTHOOK: query: alter table stored_as_dirs_multiple not stored as DIRECTORIES POSTHOOK: type: ALTERTABLE_SKEWED POSTHOOK: Input: default@stored_as_dirs_multiple POSTHOOK: Output: default@stored_as_dirs_multiple @@ -103,13 +93,11 @@ Skewed Columns: [col1, col2] Skewed Values: [[s1, 1], [s13, 13], [s3, 3], [s78, 78]] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- 3. turn off skewed -alter table stored_as_dirs_multiple not skewed +PREHOOK: query: alter table stored_as_dirs_multiple not skewed PREHOOK: type: ALTERTABLE_SKEWED PREHOOK: Input: default@stored_as_dirs_multiple PREHOOK: Output: default@stored_as_dirs_multiple -POSTHOOK: query: -- 3. turn off skewed -alter table stored_as_dirs_multiple not skewed +POSTHOOK: query: alter table stored_as_dirs_multiple not skewed POSTHOOK: type: ALTERTABLE_SKEWED POSTHOOK: Input: default@stored_as_dirs_multiple POSTHOOK: Output: default@stored_as_dirs_multiple @@ -149,13 +137,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- 4. alter a table to stored as directories -CREATE TABLE stored_as_dirs_single (key STRING, value STRING) +PREHOOK: query: CREATE TABLE stored_as_dirs_single (key STRING, value STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@stored_as_dirs_single -POSTHOOK: query: -- 4. alter a table to stored as directories -CREATE TABLE stored_as_dirs_single (key STRING, value STRING) +POSTHOOK: query: CREATE TABLE stored_as_dirs_single (key STRING, value STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@stored_as_dirs_single @@ -207,13 +193,11 @@ Skewed Columns: [key] Skewed Values: [[1], [5], [6]] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- 5. turn off skewed should turn off stored as directories too -alter table stored_as_dirs_single not skewed +PREHOOK: query: alter table stored_as_dirs_single not skewed PREHOOK: type: ALTERTABLE_SKEWED PREHOOK: Input: default@stored_as_dirs_single PREHOOK: Output: default@stored_as_dirs_single -POSTHOOK: query: -- 5. turn off skewed should turn off stored as directories too -alter table stored_as_dirs_single not skewed +POSTHOOK: query: alter table stored_as_dirs_single not skewed POSTHOOK: type: ALTERTABLE_SKEWED POSTHOOK: Input: default@stored_as_dirs_single POSTHOOK: Output: default@stored_as_dirs_single @@ -252,14 +236,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- 6. turn on stored as directories again -alter table stored_as_dirs_single SKEWED BY (key) ON ('1','5','6') +PREHOOK: query: alter table stored_as_dirs_single SKEWED BY (key) ON ('1','5','6') stored as DIRECTORIES PREHOOK: type: ALTERTABLE_SKEWED PREHOOK: Input: default@stored_as_dirs_single PREHOOK: Output: default@stored_as_dirs_single -POSTHOOK: query: -- 6. turn on stored as directories again -alter table stored_as_dirs_single SKEWED BY (key) ON ('1','5','6') +POSTHOOK: query: alter table stored_as_dirs_single SKEWED BY (key) ON ('1','5','6') stored as DIRECTORIES POSTHOOK: type: ALTERTABLE_SKEWED POSTHOOK: Input: default@stored_as_dirs_single @@ -302,13 +284,11 @@ Skewed Columns: [key] Skewed Values: [[1], [5], [6]] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- 7. create table like -create table stored_as_dirs_single_like like stored_as_dirs_single +PREHOOK: query: create table stored_as_dirs_single_like like stored_as_dirs_single PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@stored_as_dirs_single_like -POSTHOOK: query: -- 7. create table like -create table stored_as_dirs_single_like like stored_as_dirs_single +POSTHOOK: query: create table stored_as_dirs_single_like like stored_as_dirs_single POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@stored_as_dirs_single_like @@ -350,13 +330,11 @@ Skewed Columns: [key] Skewed Values: [[1], [5], [6]] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- cleanup -drop table stored_as_dirs_single +PREHOOK: query: drop table stored_as_dirs_single PREHOOK: type: DROPTABLE PREHOOK: Input: default@stored_as_dirs_single PREHOOK: Output: default@stored_as_dirs_single -POSTHOOK: query: -- cleanup -drop table stored_as_dirs_single +POSTHOOK: query: drop table stored_as_dirs_single POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@stored_as_dirs_single POSTHOOK: Output: default@stored_as_dirs_single diff --git a/ql/src/test/results/clientpositive/create_big_view.q.out b/ql/src/test/results/clientpositive/create_big_view.q.out index 020ac90..08f4c87 100644 --- a/ql/src/test/results/clientpositive/create_big_view.q.out +++ b/ql/src/test/results/clientpositive/create_big_view.q.out @@ -2,9 +2,7 @@ PREHOOK: query: DROP VIEW big_view PREHOOK: type: DROPVIEW POSTHOOK: query: DROP VIEW big_view POSTHOOK: type: DROPVIEW -PREHOOK: query: -- Define a view with long SQL text to test metastore and other limits. - -CREATE VIEW big_view AS SELECT +PREHOOK: query: CREATE VIEW big_view AS SELECT 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' AS a, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', @@ -245,9 +243,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@big_view -POSTHOOK: query: -- Define a view with long SQL text to test metastore and other limits. - -CREATE VIEW big_view AS SELECT +POSTHOOK: query: CREATE VIEW big_view AS SELECT 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' AS a, 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', diff --git a/ql/src/test/results/clientpositive/create_func1.q.out b/ql/src/test/results/clientpositive/create_func1.q.out index c5e035a..d4afc83 100644 --- a/ql/src/test/results/clientpositive/create_func1.q.out +++ b/ql/src/test/results/clientpositive/create_func1.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- qtest_get_java_boolean should already be created during test initialization -select qtest_get_java_boolean('true'), qtest_get_java_boolean('false') from src limit 1 +PREHOOK: query: select qtest_get_java_boolean('true'), qtest_get_java_boolean('false') from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- qtest_get_java_boolean should already be created during test initialization -select qtest_get_java_boolean('true'), qtest_get_java_boolean('false') from src limit 1 +POSTHOOK: query: select qtest_get_java_boolean('true'), qtest_get_java_boolean('false') from src limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### @@ -64,19 +62,15 @@ POSTHOOK: query: drop function mydb.func1 POSTHOOK: type: DROPFUNCTION POSTHOOK: Output: database:mydb POSTHOOK: Output: mydb.func1 -PREHOOK: query: -- function should now be gone -show functions mydb.func1 +PREHOOK: query: show functions mydb.func1 PREHOOK: type: SHOWFUNCTIONS -POSTHOOK: query: -- function should now be gone -show functions mydb.func1 +POSTHOOK: query: show functions mydb.func1 POSTHOOK: type: SHOWFUNCTIONS -PREHOOK: query: -- To test function name resolution -create function mydb.qtest_get_java_boolean as 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper' +PREHOOK: query: create function mydb.qtest_get_java_boolean as 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper' PREHOOK: type: CREATEFUNCTION PREHOOK: Output: database:mydb PREHOOK: Output: mydb.qtest_get_java_boolean -POSTHOOK: query: -- To test function name resolution -create function mydb.qtest_get_java_boolean as 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper' +POSTHOOK: query: create function mydb.qtest_get_java_boolean as 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFUpper' POSTHOOK: type: CREATEFUNCTION POSTHOOK: Output: database:mydb POSTHOOK: Output: mydb.qtest_get_java_boolean @@ -86,13 +80,11 @@ PREHOOK: Input: database:default POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: -- unqualified function should resolve to one in default db -select qtest_get_java_boolean('abc'), default.qtest_get_java_boolean('abc'), mydb.qtest_get_java_boolean('abc') from default.src limit 1 +PREHOOK: query: select qtest_get_java_boolean('abc'), default.qtest_get_java_boolean('abc'), mydb.qtest_get_java_boolean('abc') from default.src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- unqualified function should resolve to one in default db -select qtest_get_java_boolean('abc'), default.qtest_get_java_boolean('abc'), mydb.qtest_get_java_boolean('abc') from default.src limit 1 +POSTHOOK: query: select qtest_get_java_boolean('abc'), default.qtest_get_java_boolean('abc'), mydb.qtest_get_java_boolean('abc') from default.src limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### @@ -103,13 +95,11 @@ PREHOOK: Input: database:mydb POSTHOOK: query: use mydb POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:mydb -PREHOOK: query: -- unqualified function should resolve to one in mydb db -select qtest_get_java_boolean('abc'), default.qtest_get_java_boolean('abc'), mydb.qtest_get_java_boolean('abc') from default.src limit 1 +PREHOOK: query: select qtest_get_java_boolean('abc'), default.qtest_get_java_boolean('abc'), mydb.qtest_get_java_boolean('abc') from default.src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- unqualified function should resolve to one in mydb db -select qtest_get_java_boolean('abc'), default.qtest_get_java_boolean('abc'), mydb.qtest_get_java_boolean('abc') from default.src limit 1 +POSTHOOK: query: select qtest_get_java_boolean('abc'), default.qtest_get_java_boolean('abc'), mydb.qtest_get_java_boolean('abc') from default.src limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/create_like2.q.out b/ql/src/test/results/clientpositive/create_like2.q.out index 9f14a33..38bd499 100644 --- a/ql/src/test/results/clientpositive/create_like2.q.out +++ b/ql/src/test/results/clientpositive/create_like2.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- Tests the copying over of Table Parameters according to a HiveConf setting --- when doing a CREATE TABLE LIKE. - -CREATE TABLE table1(a INT, b STRING) +PREHOOK: query: CREATE TABLE table1(a INT, b STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@table1 -POSTHOOK: query: -- Tests the copying over of Table Parameters according to a HiveConf setting --- when doing a CREATE TABLE LIKE. - -CREATE TABLE table1(a INT, b STRING) +POSTHOOK: query: CREATE TABLE table1(a INT, b STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@table1 diff --git a/ql/src/test/results/clientpositive/create_like_tbl_props.q.out b/ql/src/test/results/clientpositive/create_like_tbl_props.q.out index 253106c..bef54a8 100644 --- a/ql/src/test/results/clientpositive/create_like_tbl_props.q.out +++ b/ql/src/test/results/clientpositive/create_like_tbl_props.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Test that CREATE TABLE LIKE commands can take explicit table properties - -CREATE TABLE test_table LIKE src TBLPROPERTIES('key'='value') +PREHOOK: query: CREATE TABLE test_table LIKE src TBLPROPERTIES('key'='value') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table -POSTHOOK: query: -- Test that CREATE TABLE LIKE commands can take explicit table properties - -CREATE TABLE test_table LIKE src TBLPROPERTIES('key'='value') +POSTHOOK: query: CREATE TABLE test_table LIKE src TBLPROPERTIES('key'='value') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table @@ -46,15 +42,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: --Test that CREATE TABLE LIKE commands can take default table properties - -CREATE TABLE test_table1 LIKE src +PREHOOK: query: CREATE TABLE test_table1 LIKE src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: --Test that CREATE TABLE LIKE commands can take default table properties - -CREATE TABLE test_table1 LIKE src +POSTHOOK: query: CREATE TABLE test_table1 LIKE src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table1 @@ -94,15 +86,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test that CREATE TABLE LIKE commands can take default and explicit table properties - -CREATE TABLE test_table2 LIKE src TBLPROPERTIES('key2' = 'value2') +PREHOOK: query: CREATE TABLE test_table2 LIKE src TBLPROPERTIES('key2' = 'value2') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table2 -POSTHOOK: query: -- Test that CREATE TABLE LIKE commands can take default and explicit table properties - -CREATE TABLE test_table2 LIKE src TBLPROPERTIES('key2' = 'value2') +POSTHOOK: query: CREATE TABLE test_table2 LIKE src TBLPROPERTIES('key2' = 'value2') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table2 @@ -143,15 +131,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test that properties inherited are overwritten by explicitly set ones - -CREATE TABLE test_table3 LIKE test_table2 TBLPROPERTIES('key2' = 'value3') +PREHOOK: query: CREATE TABLE test_table3 LIKE test_table2 TBLPROPERTIES('key2' = 'value3') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table3 -POSTHOOK: query: -- Test that properties inherited are overwritten by explicitly set ones - -CREATE TABLE test_table3 LIKE test_table2 TBLPROPERTIES('key2' = 'value3') +POSTHOOK: query: CREATE TABLE test_table3 LIKE test_table2 TBLPROPERTIES('key2' = 'value3') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table3 @@ -192,16 +176,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: --Test that CREATE TALBE LIKE on a view can take explicit table properties - -CREATE VIEW test_view (key, value) AS SELECT * FROM src +PREHOOK: query: CREATE VIEW test_view (key, value) AS SELECT * FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@test_view -POSTHOOK: query: --Test that CREATE TALBE LIKE on a view can take explicit table properties - -CREATE VIEW test_view (key, value) AS SELECT * FROM src +POSTHOOK: query: CREATE VIEW test_view (key, value) AS SELECT * FROM src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/create_like_view.q.out b/ql/src/test/results/clientpositive/create_like_view.q.out index d5dba40..c1b5c7e 100644 --- a/ql/src/test/results/clientpositive/create_like_view.q.out +++ b/ql/src/test/results/clientpositive/create_like_view.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE IF EXISTS table1 +PREHOOK: query: DROP TABLE IF EXISTS table1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE IF EXISTS table1 +POSTHOOK: query: DROP TABLE IF EXISTS table1 POSTHOOK: type: DROPTABLE PREHOOK: query: DROP TABLE IF EXISTS table2 PREHOOK: type: DROPTABLE @@ -253,14 +249,12 @@ POSTHOOK: query: DROP VIEW view1 POSTHOOK: type: DROPVIEW POSTHOOK: Input: default@view1 POSTHOOK: Output: default@view1 -PREHOOK: query: -- check partitions -create view view1 partitioned on (ds, hr) as select * from srcpart +PREHOOK: query: create view view1 partitioned on (ds, hr) as select * from srcpart PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart PREHOOK: Output: database:default PREHOOK: Output: default@view1 -POSTHOOK: query: -- check partitions -create view view1 partitioned on (ds, hr) as select * from srcpart +POSTHOOK: query: create view view1 partitioned on (ds, hr) as select * from srcpart POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/create_or_replace_view.q.out b/ql/src/test/results/clientpositive/create_or_replace_view.q.out index 834cdf0..2ad6b47 100644 --- a/ql/src/test/results/clientpositive/create_or_replace_view.q.out +++ b/ql/src/test/results/clientpositive/create_or_replace_view.q.out @@ -48,14 +48,12 @@ Sort Columns: [] View Original Text: select * from srcpart View Expanded Text: select `srcpart`.`key`, `srcpart`.`value`, `srcpart`.`ds`, `srcpart`.`hr` from `default`.`srcpart` View Rewrite Enabled: No -PREHOOK: query: -- modifying definition of unpartitioned view -create or replace view vt.v partitioned on (ds, hr) as select * from srcpart +PREHOOK: query: create or replace view vt.v partitioned on (ds, hr) as select * from srcpart PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart PREHOOK: Output: database:vt PREHOOK: Output: vt@v -POSTHOOK: query: -- modifying definition of unpartitioned view -create or replace view vt.v partitioned on (ds, hr) as select * from srcpart +POSTHOOK: query: create or replace view vt.v partitioned on (ds, hr) as select * from srcpart POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart POSTHOOK: Output: database:vt @@ -169,14 +167,12 @@ PREHOOK: Input: vt@v POSTHOOK: query: show partitions vt.v POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: vt@v -PREHOOK: query: -- altering partitioned view 1 -create or replace view vt.v partitioned on (ds, hr) as select value, ds, hr from srcpart +PREHOOK: query: create or replace view vt.v partitioned on (ds, hr) as select value, ds, hr from srcpart PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart PREHOOK: Output: database:vt PREHOOK: Output: vt@v -POSTHOOK: query: -- altering partitioned view 1 -create or replace view vt.v partitioned on (ds, hr) as select value, ds, hr from srcpart +POSTHOOK: query: create or replace view vt.v partitioned on (ds, hr) as select value, ds, hr from srcpart POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart POSTHOOK: Output: database:vt @@ -239,14 +235,12 @@ PREHOOK: Input: vt@v POSTHOOK: query: show partitions vt.v POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: vt@v -PREHOOK: query: -- altering partitioned view 2 -create or replace view vt.v partitioned on (ds, hr) as select key, value, ds, hr from srcpart +PREHOOK: query: create or replace view vt.v partitioned on (ds, hr) as select key, value, ds, hr from srcpart PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart PREHOOK: Output: database:vt PREHOOK: Output: vt@v -POSTHOOK: query: -- altering partitioned view 2 -create or replace view vt.v partitioned on (ds, hr) as select key, value, ds, hr from srcpart +POSTHOOK: query: create or replace view vt.v partitioned on (ds, hr) as select key, value, ds, hr from srcpart POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart POSTHOOK: Output: database:vt @@ -318,13 +312,11 @@ POSTHOOK: query: drop view vt.v POSTHOOK: type: DROPVIEW POSTHOOK: Input: vt@v POSTHOOK: Output: vt@v -PREHOOK: query: -- updating to fix view with invalid definition -create table srcpart_temp like srcpart +PREHOOK: query: create table srcpart_temp like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@srcpart_temp -POSTHOOK: query: -- updating to fix view with invalid definition -create table srcpart_temp like srcpart +POSTHOOK: query: create table srcpart_temp like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@srcpart_temp diff --git a/ql/src/test/results/clientpositive/create_udaf.q.out b/ql/src/test/results/clientpositive/create_udaf.q.out index 3aaf5a0..6959b47 100644 --- a/ql/src/test/results/clientpositive/create_udaf.q.out +++ b/ql/src/test/results/clientpositive/create_udaf.q.out @@ -42,13 +42,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@dest1 #### A masked pattern was here #### 7 -PREHOOK: query: -- cover all the other value types: -SELECT test_max(CAST(length(src.value) AS SMALLINT)) FROM src +PREHOOK: query: SELECT test_max(CAST(length(src.value) AS SMALLINT)) FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- cover all the other value types: -SELECT test_max(CAST(length(src.value) AS SMALLINT)) FROM src +POSTHOOK: query: SELECT test_max(CAST(length(src.value) AS SMALLINT)) FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/create_view.q.out b/ql/src/test/results/clientpositive/create_view.q.out index 26b15e2..a7f3229 100644 --- a/ql/src/test/results/clientpositive/create_view.q.out +++ b/ql/src/test/results/clientpositive/create_view.q.out @@ -151,12 +151,10 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@view3 #### A masked pattern was here #### VAL_86 -PREHOOK: query: -- test EXPLAIN output for CREATE VIEW -EXPLAIN +PREHOOK: query: EXPLAIN CREATE VIEW view0(valoo) AS SELECT upper(value) FROM src WHERE key=86 PREHOOK: type: CREATEVIEW -POSTHOOK: query: -- test EXPLAIN output for CREATE VIEW -EXPLAIN +POSTHOOK: query: EXPLAIN CREATE VIEW view0(valoo) AS SELECT upper(value) FROM src WHERE key=86 POSTHOOK: type: CREATEVIEW STAGE DEPENDENCIES: @@ -173,12 +171,10 @@ STAGE PLANS: original text: SELECT upper(value) FROM src WHERE key=86 rewrite enabled: false -PREHOOK: query: -- make sure EXPLAIN works with a query which references a view -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * from view2 where key=18 PREHOOK: type: QUERY -POSTHOOK: query: -- make sure EXPLAIN works with a query which references a view -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * from view2 where key=18 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -427,14 +423,10 @@ POSTHOOK: query: CREATE TABLE table1 (key int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@table1 -PREHOOK: query: -- use DESCRIBE EXTENDED on a base table and an external table as points --- of comparison for view descriptions -DESCRIBE EXTENDED table1 +PREHOOK: query: DESCRIBE EXTENDED table1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@table1 -POSTHOOK: query: -- use DESCRIBE EXTENDED on a base table and an external table as points --- of comparison for view descriptions -DESCRIBE EXTENDED table1 +POSTHOOK: query: DESCRIBE EXTENDED table1 POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@table1 key int @@ -450,14 +442,10 @@ key string default value string default #### A masked pattern was here #### -PREHOOK: query: -- use DESCRIBE EXTENDED on a base table as a point of comparison for --- view descriptions -DESCRIBE EXTENDED table1 +PREHOOK: query: DESCRIBE EXTENDED table1 PREHOOK: type: DESCTABLE PREHOOK: Input: default@table1 -POSTHOOK: query: -- use DESCRIBE EXTENDED on a base table as a point of comparison for --- view descriptions -DESCRIBE EXTENDED table1 +POSTHOOK: query: DESCRIBE EXTENDED table1 POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@table1 key int @@ -588,17 +576,13 @@ POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@view5 key1 int key2 int -PREHOOK: query: -- verify that column name and comment in DDL portion --- overrides column alias in SELECT -CREATE VIEW view6(valoo COMMENT 'I cannot spell') AS +PREHOOK: query: CREATE VIEW view6(valoo COMMENT 'I cannot spell') AS SELECT upper(value) as blarg FROM src WHERE key=86 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@view6 -POSTHOOK: query: -- verify that column name and comment in DDL portion --- overrides column alias in SELECT -CREATE VIEW view6(valoo COMMENT 'I cannot spell') AS +POSTHOOK: query: CREATE VIEW view6(valoo COMMENT 'I cannot spell') AS SELECT upper(value) as blarg FROM src WHERE key=86 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src @@ -611,8 +595,7 @@ POSTHOOK: query: DESCRIBE view6 POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@view6 valoo string I cannot spell -PREHOOK: query: -- verify that ORDER BY and LIMIT are both supported in view def -CREATE VIEW view7 AS +PREHOOK: query: CREATE VIEW view7 AS SELECT * FROM src WHERE key > 80 AND key < 100 ORDER BY key, value @@ -621,8 +604,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@view7 -POSTHOOK: query: -- verify that ORDER BY and LIMIT are both supported in view def -CREATE VIEW view7 AS +POSTHOOK: query: CREATE VIEW view7 AS SELECT * FROM src WHERE key > 80 AND key < 100 ORDER BY key, value @@ -651,18 +633,12 @@ POSTHOOK: Input: default@view7 87 val_87 90 val_90 90 val_90 -PREHOOK: query: -- top-level ORDER BY should override the one inside the view --- (however, the inside ORDER BY should still influence the evaluation --- of the limit) -SELECT * FROM view7 ORDER BY key DESC, value +PREHOOK: query: SELECT * FROM view7 ORDER BY key DESC, value PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@view7 #### A masked pattern was here #### -POSTHOOK: query: -- top-level ORDER BY should override the one inside the view --- (however, the inside ORDER BY should still influence the evaluation --- of the limit) -SELECT * FROM view7 ORDER BY key DESC, value +POSTHOOK: query: SELECT * FROM view7 ORDER BY key DESC, value POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@view7 @@ -677,14 +653,12 @@ POSTHOOK: Input: default@view7 83 val_83 83 val_83 82 val_82 -PREHOOK: query: -- top-level LIMIT should override if lower -SELECT * FROM view7 LIMIT 5 +PREHOOK: query: SELECT * FROM view7 LIMIT 5 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@view7 #### A masked pattern was here #### -POSTHOOK: query: -- top-level LIMIT should override if lower -SELECT * FROM view7 LIMIT 5 +POSTHOOK: query: SELECT * FROM view7 LIMIT 5 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@view7 @@ -694,14 +668,12 @@ POSTHOOK: Input: default@view7 83 val_83 84 val_84 84 val_84 -PREHOOK: query: -- but not if higher -SELECT * FROM view7 LIMIT 20 +PREHOOK: query: SELECT * FROM view7 LIMIT 20 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@view7 #### A masked pattern was here #### -POSTHOOK: query: -- but not if higher -SELECT * FROM view7 LIMIT 20 +POSTHOOK: query: SELECT * FROM view7 LIMIT 20 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@view7 @@ -716,13 +688,11 @@ POSTHOOK: Input: default@view7 87 val_87 90 val_90 90 val_90 -PREHOOK: query: -- test usage of a function within a view -CREATE TEMPORARY FUNCTION test_translate AS +PREHOOK: query: CREATE TEMPORARY FUNCTION test_translate AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestTranslate' PREHOOK: type: CREATEFUNCTION PREHOOK: Output: test_translate -POSTHOOK: query: -- test usage of a function within a view -CREATE TEMPORARY FUNCTION test_translate AS +POSTHOOK: query: CREATE TEMPORARY FUNCTION test_translate AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDFTestTranslate' POSTHOOK: type: CREATEFUNCTION POSTHOOK: Output: test_translate @@ -795,26 +765,22 @@ POSTHOOK: Input: default@table1 POSTHOOK: Input: default@view8 #### A masked pattern was here #### bbc -PREHOOK: query: -- test usage of a UDAF within a view -CREATE TEMPORARY FUNCTION test_max AS +PREHOOK: query: CREATE TEMPORARY FUNCTION test_max AS 'org.apache.hadoop.hive.ql.udf.UDAFTestMax' PREHOOK: type: CREATEFUNCTION PREHOOK: Output: test_max -POSTHOOK: query: -- test usage of a UDAF within a view -CREATE TEMPORARY FUNCTION test_max AS +POSTHOOK: query: CREATE TEMPORARY FUNCTION test_max AS 'org.apache.hadoop.hive.ql.udf.UDAFTestMax' POSTHOOK: type: CREATEFUNCTION POSTHOOK: Output: test_max -PREHOOK: query: -- disable map-side aggregation -CREATE VIEW view9(m) AS +PREHOOK: query: CREATE VIEW view9(m) AS SELECT test_max(length(value)) FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@view9 -POSTHOOK: query: -- disable map-side aggregation -CREATE VIEW view9(m) AS +POSTHOOK: query: CREATE VIEW view9(m) AS SELECT test_max(length(value)) FROM src POSTHOOK: type: CREATEVIEW @@ -884,16 +850,14 @@ POSTHOOK: query: DROP VIEW view9 POSTHOOK: type: DROPVIEW POSTHOOK: Input: default@view9 POSTHOOK: Output: default@view9 -PREHOOK: query: -- enable map-side aggregation -CREATE VIEW view9(m) AS +PREHOOK: query: CREATE VIEW view9(m) AS SELECT test_max(length(value)) FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@view9 -POSTHOOK: query: -- enable map-side aggregation -CREATE VIEW view9(m) AS +POSTHOOK: query: CREATE VIEW view9(m) AS SELECT test_max(length(value)) FROM src POSTHOOK: type: CREATEVIEW @@ -955,15 +919,13 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@view9 #### A masked pattern was here #### 7 -PREHOOK: query: -- test usage of a subselect within a view -CREATE VIEW view10 AS +PREHOOK: query: CREATE VIEW view10 AS SELECT slurp.* FROM (SELECT * FROM src WHERE key=86) slurp PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@view10 -POSTHOOK: query: -- test usage of a subselect within a view -CREATE VIEW view10 AS +POSTHOOK: query: CREATE VIEW view10 AS SELECT slurp.* FROM (SELECT * FROM src WHERE key=86) slurp POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src @@ -1022,13 +984,11 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@view10 #### A masked pattern was here #### 86 val_86 -PREHOOK: query: -- test usage of a UDTF within a view -CREATE TEMPORARY FUNCTION test_explode AS +PREHOOK: query: CREATE TEMPORARY FUNCTION test_explode AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDTFExplode' PREHOOK: type: CREATEFUNCTION PREHOOK: Output: test_explode -POSTHOOK: query: -- test usage of a UDTF within a view -CREATE TEMPORARY FUNCTION test_explode AS +POSTHOOK: query: CREATE TEMPORARY FUNCTION test_explode AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDTFExplode' POSTHOOK: type: CREATEFUNCTION POSTHOOK: Output: test_explode @@ -1103,15 +1063,13 @@ POSTHOOK: Input: default@view11 1 2 3 -PREHOOK: query: -- test usage of LATERAL within a view -CREATE VIEW view12 AS +PREHOOK: query: CREATE VIEW view12 AS SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@view12 -POSTHOOK: query: -- test usage of LATERAL within a view -CREATE VIEW view12 AS +POSTHOOK: query: CREATE VIEW view12 AS SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src @@ -1174,31 +1132,27 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@view12 #### A masked pattern was here #### 0 val_0 1 -PREHOOK: query: -- test usage of LATERAL with a view as the LHS -SELECT * FROM view2 LATERAL VIEW explode(array(1,2,3)) myTable AS myCol +PREHOOK: query: SELECT * FROM view2 LATERAL VIEW explode(array(1,2,3)) myTable AS myCol ORDER BY key ASC, myCol ASC LIMIT 1 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@view2 #### A masked pattern was here #### -POSTHOOK: query: -- test usage of LATERAL with a view as the LHS -SELECT * FROM view2 LATERAL VIEW explode(array(1,2,3)) myTable AS myCol +POSTHOOK: query: SELECT * FROM view2 LATERAL VIEW explode(array(1,2,3)) myTable AS myCol ORDER BY key ASC, myCol ASC LIMIT 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@view2 #### A masked pattern was here #### 0 val_0 1 -PREHOOK: query: -- test usage of TABLESAMPLE within a view -CREATE VIEW view13 AS +PREHOOK: query: CREATE VIEW view13 AS SELECT s.key FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 ON key) s PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcbucket PREHOOK: Output: database:default PREHOOK: Output: default@view13 -POSTHOOK: query: -- test usage of TABLESAMPLE within a view -CREATE VIEW view13 AS +POSTHOOK: query: CREATE VIEW view13 AS SELECT s.key FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 ON key) s POSTHOOK: type: CREATEVIEW @@ -1273,8 +1227,7 @@ POSTHOOK: Input: default@view13 10 10 15 -PREHOOK: query: -- test usage of JOIN+UNION+AGG all within same view -CREATE VIEW view14 AS +PREHOOK: query: CREATE VIEW view14 AS SELECT unionsrc1.key as k1, unionsrc1.value as v1, unionsrc2.key as k2, unionsrc2.value as v2 FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 @@ -1289,8 +1242,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@view14 -POSTHOOK: query: -- test usage of JOIN+UNION+AGG all within same view -CREATE VIEW view14 AS +POSTHOOK: query: CREATE VIEW view14 AS SELECT unionsrc1.key as k1, unionsrc1.value as v1, unionsrc2.key as k2, unionsrc2.value as v2 FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 @@ -1422,8 +1374,7 @@ POSTHOOK: Input: default@view14 8 val_8 8 val_8 9 val_9 9 val_9 tst1 500 tst1 500 -PREHOOK: query: -- test usage of GROUP BY within view -CREATE VIEW view15 AS +PREHOOK: query: CREATE VIEW view15 AS SELECT key,COUNT(value) AS value_count FROM src GROUP BY key @@ -1431,8 +1382,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@view15 -POSTHOOK: query: -- test usage of GROUP BY within view -CREATE VIEW view15 AS +POSTHOOK: query: CREATE VIEW view15 AS SELECT key,COUNT(value) AS value_count FROM src GROUP BY key @@ -1514,16 +1464,14 @@ POSTHOOK: Input: default@view15 406 4 468 4 489 4 -PREHOOK: query: -- test usage of DISTINCT within view -CREATE VIEW view16 AS +PREHOOK: query: CREATE VIEW view16 AS SELECT DISTINCT value FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@view16 -POSTHOOK: query: -- test usage of DISTINCT within view -CREATE VIEW view16 AS +POSTHOOK: query: CREATE VIEW view16 AS SELECT DISTINCT value FROM src POSTHOOK: type: CREATEVIEW @@ -1598,13 +1546,11 @@ val_11 val_111 val_113 val_114 -PREHOOK: query: -- HIVE-2133: DROP TABLE IF EXISTS should ignore a matching view name -DROP TABLE IF EXISTS view16 +PREHOOK: query: DROP TABLE IF EXISTS view16 PREHOOK: type: DROPTABLE PREHOOK: Input: default@view16 PREHOOK: Output: default@view16 -POSTHOOK: query: -- HIVE-2133: DROP TABLE IF EXISTS should ignore a matching view name -DROP TABLE IF EXISTS view16 +POSTHOOK: query: DROP TABLE IF EXISTS view16 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@view16 POSTHOOK: Output: default@view16 @@ -1615,13 +1561,11 @@ POSTHOOK: query: DESCRIBE view16 POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@view16 value string -PREHOOK: query: -- Likewise, DROP VIEW IF EXISTS should ignore a matching table name -DROP VIEW IF EXISTS table1 +PREHOOK: query: DROP VIEW IF EXISTS table1 PREHOOK: type: DROPVIEW PREHOOK: Input: default@table1 PREHOOK: Output: default@table1 -POSTHOOK: query: -- Likewise, DROP VIEW IF EXISTS should ignore a matching table name -DROP VIEW IF EXISTS table1 +POSTHOOK: query: DROP VIEW IF EXISTS table1 POSTHOOK: type: DROPVIEW POSTHOOK: Input: default@table1 POSTHOOK: Output: default@table1 @@ -1633,19 +1577,11 @@ POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@table1 key int value string -PREHOOK: query: -- this should work since currently we don't track view->table --- dependencies for implementing RESTRICT - - -DROP VIEW view1 +PREHOOK: query: DROP VIEW view1 PREHOOK: type: DROPVIEW PREHOOK: Input: default@view1 PREHOOK: Output: default@view1 -POSTHOOK: query: -- this should work since currently we don't track view->table --- dependencies for implementing RESTRICT - - -DROP VIEW view1 +POSTHOOK: query: DROP VIEW view1 POSTHOOK: type: DROPVIEW POSTHOOK: Input: default@view1 POSTHOOK: Output: default@view1 diff --git a/ql/src/test/results/clientpositive/create_view_partitioned.q.out b/ql/src/test/results/clientpositive/create_view_partitioned.q.out index 1be3556..85667b6 100644 --- a/ql/src/test/results/clientpositive/create_view_partitioned.q.out +++ b/ql/src/test/results/clientpositive/create_view_partitioned.q.out @@ -10,9 +10,7 @@ PREHOOK: query: DROP VIEW vp3 PREHOOK: type: DROPVIEW POSTHOOK: query: DROP VIEW vp3 POSTHOOK: type: DROPVIEW -PREHOOK: query: -- test partitioned view definition --- (underlying table is not actually partitioned) -CREATE VIEW vp1 +PREHOOK: query: CREATE VIEW vp1 PARTITIONED ON (value) AS SELECT key, value @@ -22,9 +20,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@vp1 -POSTHOOK: query: -- test partitioned view definition --- (underlying table is not actually partitioned) -CREATE VIEW vp1 +POSTHOOK: query: CREATE VIEW vp1 PARTITIONED ON (value) AS SELECT key, value @@ -140,15 +136,13 @@ POSTHOOK: Input: default@vp1 POSTHOOK: Output: default@vp1 POSTHOOK: Output: default@vp1@value=val_86 POSTHOOK: Output: default@vp1@value=val_xyz -PREHOOK: query: -- should work since we use IF NOT EXISTS -ALTER VIEW vp1 +PREHOOK: query: ALTER VIEW vp1 ADD IF NOT EXISTS PARTITION (value='val_xyz') PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Input: default@src PREHOOK: Input: default@vp1 PREHOOK: Output: default@vp1 -POSTHOOK: query: -- should work since we use IF NOT EXISTS -ALTER VIEW vp1 +POSTHOOK: query: ALTER VIEW vp1 ADD IF NOT EXISTS PARTITION (value='val_xyz') POSTHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: Input: default@src @@ -205,13 +199,11 @@ DROP PARTITION (value='val_xyz') POSTHOOK: type: ALTERTABLE_DROPPARTS POSTHOOK: Input: default@vp1 POSTHOOK: Output: default@vp1@value=val_xyz -PREHOOK: query: -- should work since we use IF EXISTS -ALTER VIEW vp1 +PREHOOK: query: ALTER VIEW vp1 DROP IF EXISTS PARTITION (value='val_xyz') PREHOOK: type: ALTERTABLE_DROPPARTS PREHOOK: Input: default@vp1 -POSTHOOK: query: -- should work since we use IF EXISTS -ALTER VIEW vp1 +POSTHOOK: query: ALTER VIEW vp1 DROP IF EXISTS PARTITION (value='val_xyz') POSTHOOK: type: ALTERTABLE_DROPPARTS POSTHOOK: Input: default@vp1 @@ -222,37 +214,25 @@ POSTHOOK: query: SHOW PARTITIONS vp1 POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@vp1 value=val_86 -PREHOOK: query: -- Even though no partition predicate is specified in the next query, --- the WHERE clause inside of the view should satisfy strict mode. --- In other words, strict only applies to underlying tables --- (regardless of whether or not the view is partitioned). -SELECT * FROM vp1 +PREHOOK: query: SELECT * FROM vp1 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@vp1 #### A masked pattern was here #### -POSTHOOK: query: -- Even though no partition predicate is specified in the next query, --- the WHERE clause inside of the view should satisfy strict mode. --- In other words, strict only applies to underlying tables --- (regardless of whether or not the view is partitioned). -SELECT * FROM vp1 +POSTHOOK: query: SELECT * FROM vp1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@vp1 #### A masked pattern was here #### 86 val_86 -PREHOOK: query: -- test a partitioned view on top of an underlying partitioned table, --- but with only a suffix of the partitioning columns -CREATE VIEW vp2 +PREHOOK: query: CREATE VIEW vp2 PARTITIONED ON (hr) AS SELECT * FROM srcpart WHERE key < 10 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart PREHOOK: Output: database:default PREHOOK: Output: default@vp2 -POSTHOOK: query: -- test a partitioned view on top of an underlying partitioned table, --- but with only a suffix of the partitioning columns -CREATE VIEW vp2 +POSTHOOK: query: CREATE VIEW vp2 PARTITIONED ON (hr) AS SELECT * FROM srcpart WHERE key < 10 POSTHOOK: type: CREATEVIEW @@ -351,9 +331,7 @@ POSTHOOK: Input: default@vp2 8 9 9 -PREHOOK: query: -- test a partitioned view where the PARTITIONED ON clause references --- an imposed column name -CREATE VIEW vp3(k,v) +PREHOOK: query: CREATE VIEW vp3(k,v) PARTITIONED ON (v) AS SELECT key, value @@ -363,9 +341,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@vp3 -POSTHOOK: query: -- test a partitioned view where the PARTITIONED ON clause references --- an imposed column name -CREATE VIEW vp3(k,v) +POSTHOOK: query: CREATE VIEW vp3(k,v) PARTITIONED ON (v) AS SELECT key, value diff --git a/ql/src/test/results/clientpositive/create_view_translate.q.out b/ql/src/test/results/clientpositive/create_view_translate.q.out index cb7402c..4b3e196 100644 --- a/ql/src/test/results/clientpositive/create_view_translate.q.out +++ b/ql/src/test/results/clientpositive/create_view_translate.q.out @@ -113,15 +113,11 @@ POSTHOOK: query: drop view w POSTHOOK: type: DROPVIEW POSTHOOK: Input: default@w POSTHOOK: Output: default@w -PREHOOK: query: -- HIVE-4116 Can't use views using map datatype. - -CREATE TABLE items (id INT, name STRING, info MAP) +PREHOOK: query: CREATE TABLE items (id INT, name STRING, info MAP) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@items -POSTHOOK: query: -- HIVE-4116 Can't use views using map datatype. - -CREATE TABLE items (id INT, name STRING, info MAP) +POSTHOOK: query: CREATE TABLE items (id INT, name STRING, info MAP) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@items diff --git a/ql/src/test/results/clientpositive/cross_join.q.out b/ql/src/test/results/clientpositive/cross_join.q.out index d5cbaca..b4dc9d6 100644 --- a/ql/src/test/results/clientpositive/cross_join.q.out +++ b/ql/src/test/results/clientpositive/cross_join.q.out @@ -1,9 +1,7 @@ Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: -- current -explain select src.key from src join src src2 +PREHOOK: query: explain select src.key from src join src src2 PREHOOK: type: QUERY -POSTHOOK: query: -- current -explain select src.key from src join src src2 +POSTHOOK: query: explain select src.key from src join src src2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -56,11 +54,9 @@ STAGE PLANS: ListSink Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: -- ansi cross join -explain select src.key from src cross join src src2 +PREHOOK: query: explain select src.key from src cross join src src2 PREHOOK: type: QUERY -POSTHOOK: query: -- ansi cross join -explain select src.key from src cross join src src2 +POSTHOOK: query: explain select src.key from src cross join src src2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -112,11 +108,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- appending condition is allowed -explain select src.key from src cross join src src2 on src.key=src2.key +PREHOOK: query: explain select src.key from src cross join src src2 on src.key=src2.key PREHOOK: type: QUERY -POSTHOOK: query: -- appending condition is allowed -explain select src.key from src cross join src src2 on src.key=src2.key +POSTHOOK: query: explain select src.key from src cross join src src2 on src.key=src2.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/cross_join_merge.q.out b/ql/src/test/results/clientpositive/cross_join_merge.q.out index 52e1e13..a442971 100644 --- a/ql/src/test/results/clientpositive/cross_join_merge.q.out +++ b/ql/src/test/results/clientpositive/cross_join_merge.q.out @@ -339,12 +339,10 @@ STAGE PLANS: Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: -- no merge -explain +PREHOOK: query: explain select src1.key from src src1 left outer join src src2 join src src3 PREHOOK: type: QUERY -POSTHOOK: query: -- no merge -explain +POSTHOOK: query: explain select src1.key from src src1 left outer join src src2 join src src3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/cross_product_check_1.q.out index ae2e374..4fd113d 100644 --- a/ql/src/test/results/clientpositive/cross_product_check_1.q.out +++ b/ql/src/test/results/clientpositive/cross_product_check_1.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table A as +PREHOOK: query: create table A as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@A -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table A as +POSTHOOK: query: create table A as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/cross_product_check_2.q.out index e441cb3..f22f47b 100644 --- a/ql/src/test/results/clientpositive/cross_product_check_2.q.out +++ b/ql/src/test/results/clientpositive/cross_product_check_2.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table A as +PREHOOK: query: create table A as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@A -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table A as +POSTHOOK: query: create table A as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/ctas.q.out b/ql/src/test/results/clientpositive/ctas.q.out index a9c3136b..c1b0838 100644 --- a/ql/src/test/results/clientpositive/ctas.q.out +++ b/ql/src/test/results/clientpositive/ctas.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) --- SORT_QUERY_RESULTS - -create table nzhang_Tmp(a int, b string) +PREHOOK: query: create table nzhang_Tmp(a int, b string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@nzhang_Tmp -POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) --- SORT_QUERY_RESULTS - -create table nzhang_Tmp(a int, b string) +POSTHOOK: query: create table nzhang_Tmp(a int, b string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_Tmp diff --git a/ql/src/test/results/clientpositive/ctas_char.q.out b/ql/src/test/results/clientpositive/ctas_char.q.out index c5dbe56..d5f77cd 100644 --- a/ql/src/test/results/clientpositive/ctas_char.q.out +++ b/ql/src/test/results/clientpositive/ctas_char.q.out @@ -30,28 +30,24 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@ctas_char_1 POSTHOOK: Lineage: ctas_char_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: ctas_char_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- create table as with char column -create table ctas_char_2 as select key, value from ctas_char_1 +PREHOOK: query: create table ctas_char_2 as select key, value from ctas_char_1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@ctas_char_1 PREHOOK: Output: database:default PREHOOK: Output: default@ctas_char_2 -POSTHOOK: query: -- create table as with char column -create table ctas_char_2 as select key, value from ctas_char_1 +POSTHOOK: query: create table ctas_char_2 as select key, value from ctas_char_1 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@ctas_char_1 POSTHOOK: Output: database:default POSTHOOK: Output: default@ctas_char_2 POSTHOOK: Lineage: ctas_char_2.key SIMPLE [(ctas_char_1)ctas_char_1.FieldSchema(name:key, type:char(10), comment:null), ] POSTHOOK: Lineage: ctas_char_2.value SIMPLE [(ctas_char_1)ctas_char_1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: -- view with char column -create view ctas_char_3 as select key, value from ctas_char_2 +PREHOOK: query: create view ctas_char_3 as select key, value from ctas_char_2 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@ctas_char_2 PREHOOK: Output: database:default PREHOOK: Output: default@ctas_char_3 -POSTHOOK: query: -- view with char column -create view ctas_char_3 as select key, value from ctas_char_2 +POSTHOOK: query: create view ctas_char_3 as select key, value from ctas_char_2 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@ctas_char_2 POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/ctas_colname.q.out b/ql/src/test/results/clientpositive/ctas_colname.q.out index 2622676..364dfb1 100644 --- a/ql/src/test/results/clientpositive/ctas_colname.q.out +++ b/ql/src/test/results/clientpositive/ctas_colname.q.out @@ -1,21 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- HIVE-4392, column aliases from expressionRR (GBY, etc.) are not valid name for table - --- group by - - -explain +PREHOOK: query: explain create table summary as select *, key + 1, concat(value, value) from src limit 20 PREHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- HIVE-4392, column aliases from expressionRR (GBY, etc.) are not valid name for table - --- group by - - -explain +POSTHOOK: query: explain create table summary as select *, key + 1, concat(value, value) from src limit 20 POSTHOOK: type: CREATETABLE_AS_SELECT STAGE DEPENDENCIES: @@ -157,12 +143,10 @@ POSTHOOK: Input: default@summary 66 val_66 67.0 val_66val_66 86 val_86 87.0 val_86val_86 98 val_98 99.0 val_98val_98 -PREHOOK: query: -- window functions -explain +PREHOOK: query: explain create table x4 as select *, rank() over(partition by key order by value) as rr from src1 PREHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: query: -- window functions -explain +POSTHOOK: query: explain create table x4 as select *, rank() over(partition by key order by value) as rr from src1 POSTHOOK: type: CREATETABLE_AS_SELECT STAGE DEPENDENCIES: @@ -508,12 +492,10 @@ POSTHOOK: Input: default@x5 118 val_118 118 118 val_118 NULL 119 val_119 119 -PREHOOK: query: -- sub queries -explain +PREHOOK: query: explain create table x6 as select * from (select *, key + 1 from src1) a PREHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: query: -- sub queries -explain +POSTHOOK: query: explain create table x6 as select * from (select *, key + 1 from src1) a POSTHOOK: type: CREATETABLE_AS_SELECT STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/ctas_date.q.out b/ql/src/test/results/clientpositive/ctas_date.q.out index a441f8d..60fc355 100644 --- a/ql/src/test/results/clientpositive/ctas_date.q.out +++ b/ql/src/test/results/clientpositive/ctas_date.q.out @@ -35,14 +35,12 @@ POSTHOOK: Output: default@ctas_date_1 POSTHOOK: Lineage: ctas_date_1.dd SIMPLE [] POSTHOOK: Lineage: ctas_date_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: ctas_date_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- create table as with date column -create table ctas_date_2 as select key, value, dd, date '1980-12-12' from ctas_date_1 +PREHOOK: query: create table ctas_date_2 as select key, value, dd, date '1980-12-12' from ctas_date_1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@ctas_date_1 PREHOOK: Output: database:default PREHOOK: Output: default@ctas_date_2 -POSTHOOK: query: -- create table as with date column -create table ctas_date_2 as select key, value, dd, date '1980-12-12' from ctas_date_1 +POSTHOOK: query: create table ctas_date_2 as select key, value, dd, date '1980-12-12' from ctas_date_1 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@ctas_date_1 POSTHOOK: Output: database:default @@ -51,14 +49,12 @@ POSTHOOK: Lineage: ctas_date_2.c3 SIMPLE [] POSTHOOK: Lineage: ctas_date_2.dd SIMPLE [(ctas_date_1)ctas_date_1.FieldSchema(name:dd, type:date, comment:null), ] POSTHOOK: Lineage: ctas_date_2.key SIMPLE [(ctas_date_1)ctas_date_1.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: ctas_date_2.value SIMPLE [(ctas_date_1)ctas_date_1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: -- view with date column -create view ctas_date_3 as select * from ctas_date_2 where dd > date '2000-01-01' +PREHOOK: query: create view ctas_date_3 as select * from ctas_date_2 where dd > date '2000-01-01' PREHOOK: type: CREATEVIEW PREHOOK: Input: default@ctas_date_2 PREHOOK: Output: database:default PREHOOK: Output: default@ctas_date_3 -POSTHOOK: query: -- view with date column -create view ctas_date_3 as select * from ctas_date_2 where dd > date '2000-01-01' +POSTHOOK: query: create view ctas_date_3 as select * from ctas_date_2 where dd > date '2000-01-01' POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@ctas_date_2 POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out b/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out index 39d8bcf..ed6d2bc 100644 --- a/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out +++ b/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out @@ -1,17 +1,9 @@ -PREHOOK: query: -- Tests that CTAS queries in non-default databases use the location of the database --- not the hive.metastore.warehouse.dir for intermediate files (FileSinkOperator output). --- If hive.metastore.warehouse.dir were used this would fail because the scheme is invalid. - -CREATE DATABASE db1 +PREHOOK: query: CREATE DATABASE db1 #### A masked pattern was here #### PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:db1 #### A masked pattern was here #### -POSTHOOK: query: -- Tests that CTAS queries in non-default databases use the location of the database --- not the hive.metastore.warehouse.dir for intermediate files (FileSinkOperator output). --- If hive.metastore.warehouse.dir were used this would fail because the scheme is invalid. - -CREATE DATABASE db1 +POSTHOOK: query: CREATE DATABASE db1 #### A masked pattern was here #### POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:db1 diff --git a/ql/src/test/results/clientpositive/ctas_varchar.q.out b/ql/src/test/results/clientpositive/ctas_varchar.q.out index 9715e09..3ed5e86 100644 --- a/ql/src/test/results/clientpositive/ctas_varchar.q.out +++ b/ql/src/test/results/clientpositive/ctas_varchar.q.out @@ -30,28 +30,24 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@ctas_varchar_1 POSTHOOK: Lineage: ctas_varchar_1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: ctas_varchar_1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- create table as with varchar column -create table ctas_varchar_2 as select key, value from ctas_varchar_1 +PREHOOK: query: create table ctas_varchar_2 as select key, value from ctas_varchar_1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@ctas_varchar_1 PREHOOK: Output: database:default PREHOOK: Output: default@ctas_varchar_2 -POSTHOOK: query: -- create table as with varchar column -create table ctas_varchar_2 as select key, value from ctas_varchar_1 +POSTHOOK: query: create table ctas_varchar_2 as select key, value from ctas_varchar_1 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@ctas_varchar_1 POSTHOOK: Output: database:default POSTHOOK: Output: default@ctas_varchar_2 POSTHOOK: Lineage: ctas_varchar_2.key SIMPLE [(ctas_varchar_1)ctas_varchar_1.FieldSchema(name:key, type:varchar(10), comment:null), ] POSTHOOK: Lineage: ctas_varchar_2.value SIMPLE [(ctas_varchar_1)ctas_varchar_1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: -- view with varchar column -create view ctas_varchar_3 as select key, value from ctas_varchar_2 +PREHOOK: query: create view ctas_varchar_3 as select key, value from ctas_varchar_2 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@ctas_varchar_2 PREHOOK: Output: database:default PREHOOK: Output: default@ctas_varchar_3 -POSTHOOK: query: -- view with varchar column -create view ctas_varchar_3 as select key, value from ctas_varchar_2 +POSTHOOK: query: create view ctas_varchar_3 as select key, value from ctas_varchar_2 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@ctas_varchar_2 POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/cte_1.q.out b/ql/src/test/results/clientpositive/cte_1.q.out index b624d9a..d13ad42 100644 --- a/ql/src/test/results/clientpositive/cte_1.q.out +++ b/ql/src/test/results/clientpositive/cte_1.q.out @@ -55,13 +55,11 @@ POSTHOOK: Input: default@src 5 5 5 -PREHOOK: query: -- in subquery -explain +PREHOOK: query: explain with q1 as ( select key from src where key = '5') select * from (select key from q1) a PREHOOK: type: QUERY -POSTHOOK: query: -- in subquery -explain +POSTHOOK: query: explain with q1 as ( select key from src where key = '5') select * from (select key from q1) a POSTHOOK: type: QUERY @@ -110,14 +108,12 @@ POSTHOOK: Input: default@src 5 5 5 -PREHOOK: query: -- chaining -explain +PREHOOK: query: explain with q1 as ( select key from q2 where key = '5'), q2 as ( select key from src where key = '5') select * from (select key from q1) a PREHOOK: type: QUERY -POSTHOOK: query: -- chaining -explain +POSTHOOK: query: explain with q1 as ( select key from q2 where key = '5'), q2 as ( select key from src where key = '5') select * from (select key from q1) a @@ -12951,16 +12947,14 @@ NULL y605nF0K3mMoM75j NULL 1073418988 s1Tij71BKtw43u -11535.0 1073680599 NULL NULL 1073680599 pWxC5d20ub50yq8EJ8qpQ4h NULL -PREHOOK: query: --standard rollup syntax -with q1 as (select * from alltypesorc) +PREHOOK: query: with q1 as (select * from alltypesorc) from q1 select cint, cstring1, avg(csmallint) group by rollup (cint, cstring1) PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: --standard rollup syntax -with q1 as (select * from alltypesorc) +POSTHOOK: query: with q1 as (select * from alltypesorc) from q1 select cint, cstring1, avg(csmallint) group by rollup (cint, cstring1) diff --git a/ql/src/test/results/clientpositive/cte_2.q.out b/ql/src/test/results/clientpositive/cte_2.q.out index f68d9df..1506217 100644 --- a/ql/src/test/results/clientpositive/cte_2.q.out +++ b/ql/src/test/results/clientpositive/cte_2.q.out @@ -1,12 +1,10 @@ -PREHOOK: query: -- union test -with q1 as (select * from src where key= '5'), +PREHOOK: query: with q1 as (select * from src where key= '5'), q2 as (select * from src s2 where key = '4') select * from q1 union all select * from q2 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- union test -with q1 as (select * from src where key= '5'), +POSTHOOK: query: with q1 as (select * from src where key= '5'), q2 as (select * from src s2 where key = '4') select * from q1 union all select * from q2 POSTHOOK: type: QUERY @@ -16,13 +14,11 @@ POSTHOOK: Input: default@src 5 val_5 5 val_5 5 val_5 -PREHOOK: query: -- insert test -create table s1 like src +PREHOOK: query: create table s1 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@s1 -POSTHOOK: query: -- insert test -create table s1 like src +POSTHOOK: query: create table s1 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@s1 @@ -61,15 +57,13 @@ POSTHOOK: query: drop table s1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@s1 POSTHOOK: Output: default@s1 -PREHOOK: query: -- from style -with q1 as (select * from src where key= '5') +PREHOOK: query: with q1 as (select * from src where key= '5') from q1 select * PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- from style -with q1 as (select * from src where key= '5') +POSTHOOK: query: with q1 as (select * from src where key= '5') from q1 select * POSTHOOK: type: QUERY @@ -78,16 +72,14 @@ POSTHOOK: Input: default@src 5 val_5 5 val_5 5 val_5 -PREHOOK: query: -- ctas -create table s2 as +PREHOOK: query: create table s2 as with q1 as ( select key from src where key = '4') select * from q1 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@s2 -POSTHOOK: query: -- ctas -create table s2 as +POSTHOOK: query: create table s2 as with q1 as ( select key from src where key = '4') select * from q1 POSTHOOK: type: CREATETABLE_AS_SELECT @@ -112,16 +104,14 @@ POSTHOOK: query: drop table s2 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@s2 POSTHOOK: Output: default@s2 -PREHOOK: query: -- view test -create view v1 as +PREHOOK: query: create view v1 as with q1 as ( select key from src where key = '5') select * from q1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@v1 -POSTHOOK: query: -- view test -create view v1 as +POSTHOOK: query: create view v1 as with q1 as ( select key from src where key = '5') select * from q1 POSTHOOK: type: CREATEVIEW @@ -149,16 +139,14 @@ POSTHOOK: query: drop view v1 POSTHOOK: type: DROPVIEW POSTHOOK: Input: default@v1 POSTHOOK: Output: default@v1 -PREHOOK: query: -- view test, name collision -create view v1 as +PREHOOK: query: create view v1 as with q1 as ( select key from src where key = '5') select * from q1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@v1 -POSTHOOK: query: -- view test, name collision -create view v1 as +POSTHOOK: query: create view v1 as with q1 as ( select key from src where key = '5') select * from q1 POSTHOOK: type: CREATEVIEW diff --git a/ql/src/test/results/clientpositive/cte_3.q.out b/ql/src/test/results/clientpositive/cte_3.q.out index 0fe0865..ae52a95 100644 --- a/ql/src/test/results/clientpositive/cte_3.q.out +++ b/ql/src/test/results/clientpositive/cte_3.q.out @@ -120,13 +120,11 @@ POSTHOOK: Output: default@q1 5 5 5 -PREHOOK: query: -- in subquery -explain +PREHOOK: query: explain with q1 as ( select key from src where key = '5') select * from (select key from q1) a PREHOOK: type: QUERY -POSTHOOK: query: -- in subquery -explain +POSTHOOK: query: explain with q1 as ( select key from src where key = '5') select * from (select key from q1) a POSTHOOK: type: QUERY @@ -240,14 +238,12 @@ POSTHOOK: Output: default@q1 5 5 5 -PREHOOK: query: -- chaining -explain +PREHOOK: query: explain with q1 as ( select key from q2 where key = '5'), q2 as ( select key from src where key = '5') select * from (select key from q1) a PREHOOK: type: QUERY -POSTHOOK: query: -- chaining -explain +POSTHOOK: query: explain with q1 as ( select key from q2 where key = '5'), q2 as ( select key from src where key = '5') select * from (select key from q1) a diff --git a/ql/src/test/results/clientpositive/cte_4.q.out b/ql/src/test/results/clientpositive/cte_4.q.out index 6385abe..de976c3 100644 --- a/ql/src/test/results/clientpositive/cte_4.q.out +++ b/ql/src/test/results/clientpositive/cte_4.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- union test -with q1 as (select * from src where key= '5'), +PREHOOK: query: with q1 as (select * from src where key= '5'), q2 as (select * from src s2 where key = '4') select * from q1 union all select * from q2 PREHOOK: type: QUERY @@ -10,8 +9,7 @@ PREHOOK: Output: database:default PREHOOK: Output: default@q1 PREHOOK: Output: default@q2 #### A masked pattern was here #### -POSTHOOK: query: -- union test -with q1 as (select * from src where key= '5'), +POSTHOOK: query: with q1 as (select * from src where key= '5'), q2 as (select * from src s2 where key = '4') select * from q1 union all select * from q2 POSTHOOK: type: QUERY @@ -26,13 +24,11 @@ POSTHOOK: Output: default@q2 5 val_5 5 val_5 4 val_4 -PREHOOK: query: -- insert test -create table s1 like src +PREHOOK: query: create table s1 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@s1 -POSTHOOK: query: -- insert test -create table s1 like src +POSTHOOK: query: create table s1 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@s1 @@ -79,8 +75,7 @@ POSTHOOK: query: drop table s1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@s1 POSTHOOK: Output: default@s1 -PREHOOK: query: -- from style -with q1 as (select * from src where key= '5') +PREHOOK: query: with q1 as (select * from src where key= '5') from q1 select * PREHOOK: type: QUERY @@ -89,8 +84,7 @@ PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@q1 #### A masked pattern was here #### -POSTHOOK: query: -- from style -with q1 as (select * from src where key= '5') +POSTHOOK: query: with q1 as (select * from src where key= '5') from q1 select * POSTHOOK: type: QUERY @@ -102,8 +96,7 @@ POSTHOOK: Output: default@q1 5 val_5 5 val_5 5 val_5 -PREHOOK: query: -- ctas -create table s2 as +PREHOOK: query: create table s2 as with q1 as ( select key from src where key = '4') select * from q1 PREHOOK: type: CREATETABLE_AS_SELECT @@ -113,8 +106,7 @@ PREHOOK: Output: database:default PREHOOK: Output: default@q1 PREHOOK: Output: default@s2 #### A masked pattern was here #### -POSTHOOK: query: -- ctas -create table s2 as +POSTHOOK: query: create table s2 as with q1 as ( select key from src where key = '4') select * from q1 POSTHOOK: type: CREATETABLE_AS_SELECT @@ -142,16 +134,14 @@ POSTHOOK: query: drop table s2 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@s2 POSTHOOK: Output: default@s2 -PREHOOK: query: -- view test -create view v1 as +PREHOOK: query: create view v1 as with q1 as ( select key from src where key = '5') select * from q1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@v1 -POSTHOOK: query: -- view test -create view v1 as +POSTHOOK: query: create view v1 as with q1 as ( select key from src where key = '5') select * from q1 POSTHOOK: type: CREATEVIEW @@ -179,16 +169,14 @@ POSTHOOK: query: drop view v1 POSTHOOK: type: DROPVIEW POSTHOOK: Input: default@v1 POSTHOOK: Output: default@v1 -PREHOOK: query: -- view test, name collision -create view v1 as +PREHOOK: query: create view v1 as with q1 as ( select key from src where key = '5') select * from q1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@v1 -POSTHOOK: query: -- view test, name collision -create view v1 as +POSTHOOK: query: create view v1 as with q1 as ( select key from src where key = '5') select * from q1 POSTHOOK: type: CREATEVIEW diff --git a/ql/src/test/results/clientpositive/cte_6.q.out b/ql/src/test/results/clientpositive/cte_6.q.out index 8cc433b..a313d14 100644 --- a/ql/src/test/results/clientpositive/cte_6.q.out +++ b/ql/src/test/results/clientpositive/cte_6.q.out @@ -38,16 +38,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- chaining - -explain +PREHOOK: query: explain with Q1 as ( select key from q2 where key = '5'), Q2 as ( select key from sRc where key = '5') select CPS.key from Q1 CPS PREHOOK: type: QUERY -POSTHOOK: query: -- chaining - -explain +POSTHOOK: query: explain with Q1 as ( select key from q2 where key = '5'), Q2 as ( select key from sRc where key = '5') select CPS.key from Q1 CPS diff --git a/ql/src/test/results/clientpositive/custom_input_output_format.q.out b/ql/src/test/results/clientpositive/custom_input_output_format.q.out index 662ed1a..edc972f 100644 --- a/ql/src/test/results/clientpositive/custom_input_output_format.q.out +++ b/ql/src/test/results/clientpositive/custom_input_output_format.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src1_rot13_iof(key STRING, value STRING) +PREHOOK: query: CREATE TABLE src1_rot13_iof(key STRING, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13InputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13OutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src1_rot13_iof -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src1_rot13_iof(key STRING, value STRING) +POSTHOOK: query: CREATE TABLE src1_rot13_iof(key STRING, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13InputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13OutputFormat' POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/database_drop.q.out b/ql/src/test/results/clientpositive/database_drop.q.out index 225104f..f37d8f7 100644 --- a/ql/src/test/results/clientpositive/database_drop.q.out +++ b/ql/src/test/results/clientpositive/database_drop.q.out @@ -1,19 +1,7 @@ -PREHOOK: query: -- create database with multiple tables, indexes and views. --- Use both partitioned and non-partitioned tables, as well as --- tables and indexes with specific storage locations --- verify the drop the database with cascade works and that the directories --- outside the database's default storage are removed as part of the drop - -CREATE DATABASE db5 +PREHOOK: query: CREATE DATABASE db5 PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:db5 -POSTHOOK: query: -- create database with multiple tables, indexes and views. --- Use both partitioned and non-partitioned tables, as well as --- tables and indexes with specific storage locations --- verify the drop the database with cascade works and that the directories --- outside the database's default storage are removed as part of the drop - -CREATE DATABASE db5 +POSTHOOK: query: CREATE DATABASE db5 POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:db5 PREHOOK: query: SHOW DATABASES @@ -29,13 +17,11 @@ POSTHOOK: query: USE db5 POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:db5 #### A masked pattern was here #### -PREHOOK: query: -- add a table, index and view -CREATE TABLE temp_tbl (id INT, name STRING) +PREHOOK: query: CREATE TABLE temp_tbl (id INT, name STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:db5 PREHOOK: Output: db5@temp_tbl -POSTHOOK: query: -- add a table, index and view -CREATE TABLE temp_tbl (id INT, name STRING) +POSTHOOK: query: CREATE TABLE temp_tbl (id INT, name STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db5 POSTHOOK: Output: db5@temp_tbl @@ -75,13 +61,11 @@ POSTHOOK: Output: db5@db5__temp_tbl_idx1__ POSTHOOK: Lineage: db5__temp_tbl_idx1__._bucketname SIMPLE [(temp_tbl)temp_tbl.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] POSTHOOK: Lineage: db5__temp_tbl_idx1__._offsets EXPRESSION [(temp_tbl)temp_tbl.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: db5__temp_tbl_idx1__.id SIMPLE [(temp_tbl)temp_tbl.FieldSchema(name:id, type:int, comment:null), ] -PREHOOK: query: -- add a table, index and view with a different storage location #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:db5 PREHOOK: Output: db5@temp_tbl2 -POSTHOOK: query: -- add a table, index and view with a different storage location #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### @@ -124,13 +108,11 @@ POSTHOOK: Output: db5@db5__temp_tbl2_idx2__ POSTHOOK: Lineage: db5__temp_tbl2_idx2__._bucketname SIMPLE [(temp_tbl2)temp_tbl2.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] POSTHOOK: Lineage: db5__temp_tbl2_idx2__._offsets EXPRESSION [(temp_tbl2)temp_tbl2.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: db5__temp_tbl2_idx2__.id SIMPLE [(temp_tbl2)temp_tbl2.FieldSchema(name:id, type:int, comment:null), ] -PREHOOK: query: -- add a partitioned table, index and view -CREATE TABLE part_tab (id INT, name STRING) PARTITIONED BY (ds string) +PREHOOK: query: CREATE TABLE part_tab (id INT, name STRING) PARTITIONED BY (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:db5 PREHOOK: Output: db5@part_tab -POSTHOOK: query: -- add a partitioned table, index and view -CREATE TABLE part_tab (id INT, name STRING) PARTITIONED BY (ds string) +POSTHOOK: query: CREATE TABLE part_tab (id INT, name STRING) PARTITIONED BY (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db5 POSTHOOK: Output: db5@part_tab @@ -185,15 +167,13 @@ POSTHOOK: Output: db5@db5__part_tab_idx3__@ds=2009-04-09 POSTHOOK: Lineage: db5__part_tab_idx3__ PARTITION(ds=2009-04-09)._bucketname SIMPLE [(part_tab)part_tab.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] POSTHOOK: Lineage: db5__part_tab_idx3__ PARTITION(ds=2009-04-09)._offsets EXPRESSION [(part_tab)part_tab.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: db5__part_tab_idx3__ PARTITION(ds=2009-04-09).id SIMPLE [(part_tab)part_tab.FieldSchema(name:id, type:int, comment:null), ] -PREHOOK: query: -- add a partitioned table, index and view with a different storage location -CREATE TABLE part_tab2 (id INT, name STRING) PARTITIONED BY (ds string) +PREHOOK: query: CREATE TABLE part_tab2 (id INT, name STRING) PARTITIONED BY (ds string) #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:db5 PREHOOK: Output: db5@part_tab2 -POSTHOOK: query: -- add a partitioned table, index and view with a different storage location -CREATE TABLE part_tab2 (id INT, name STRING) PARTITIONED BY (ds string) +POSTHOOK: query: CREATE TABLE part_tab2 (id INT, name STRING) PARTITIONED BY (ds string) #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### @@ -254,15 +234,13 @@ POSTHOOK: Output: db5@db5__part_tab2_idx4__@ds=2009-04-09 POSTHOOK: Lineage: db5__part_tab2_idx4__ PARTITION(ds=2009-04-09)._bucketname SIMPLE [(part_tab2)part_tab2.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] POSTHOOK: Lineage: db5__part_tab2_idx4__ PARTITION(ds=2009-04-09)._offsets EXPRESSION [(part_tab2)part_tab2.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: db5__part_tab2_idx4__ PARTITION(ds=2009-04-09).id SIMPLE [(part_tab2)part_tab2.FieldSchema(name:id, type:int, comment:null), ] -PREHOOK: query: -- add a partitioned table, index and view with a different storage location -CREATE TABLE part_tab3 (id INT, name STRING) PARTITIONED BY (ds string) +PREHOOK: query: CREATE TABLE part_tab3 (id INT, name STRING) PARTITIONED BY (ds string) #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:db5 PREHOOK: Output: db5@part_tab3 -POSTHOOK: query: -- add a partitioned table, index and view with a different storage location -CREATE TABLE part_tab3 (id INT, name STRING) PARTITIONED BY (ds string) +POSTHOOK: query: CREATE TABLE part_tab3 (id INT, name STRING) PARTITIONED BY (ds string) #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### @@ -332,8 +310,7 @@ POSTHOOK: Output: db5@db5__part_tab3_idx5__@ds=2009-04-09 POSTHOOK: Lineage: db5__part_tab3_idx5__ PARTITION(ds=2009-04-09)._bucketname SIMPLE [(part_tab3)part_tab3.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] POSTHOOK: Lineage: db5__part_tab3_idx5__ PARTITION(ds=2009-04-09)._offsets EXPRESSION [(part_tab3)part_tab3.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: db5__part_tab3_idx5__ PARTITION(ds=2009-04-09).id SIMPLE [(part_tab3)part_tab3.FieldSchema(name:id, type:int, comment:null), ] -PREHOOK: query: -- add an external table -CREATE EXTERNAL TABLE extab1(id INT, name STRING) ROW FORMAT +PREHOOK: query: CREATE EXTERNAL TABLE extab1(id INT, name STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '' LINES TERMINATED BY '\n' STORED AS TEXTFILE @@ -342,8 +319,7 @@ PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:db5 PREHOOK: Output: db5@extab1 -POSTHOOK: query: -- add an external table -CREATE EXTERNAL TABLE extab1(id INT, name STRING) ROW FORMAT +POSTHOOK: query: CREATE EXTERNAL TABLE extab1(id INT, name STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '' LINES TERMINATED BY '\n' STORED AS TEXTFILE @@ -352,13 +328,11 @@ POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:db5 POSTHOOK: Output: db5@extab1 -PREHOOK: query: -- add a table, create index (give a name for index table) -CREATE TABLE temp_tbl3 (id INT, name STRING) +PREHOOK: query: CREATE TABLE temp_tbl3 (id INT, name STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:db5 PREHOOK: Output: db5@temp_tbl3 -POSTHOOK: query: -- add a table, create index (give a name for index table) -CREATE TABLE temp_tbl3 (id INT, name STRING) +POSTHOOK: query: CREATE TABLE temp_tbl3 (id INT, name STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db5 POSTHOOK: Output: db5@temp_tbl3 @@ -388,8 +362,7 @@ POSTHOOK: Output: db5@temp_tbl3_idx_tbl POSTHOOK: Lineage: temp_tbl3_idx_tbl._bucketname SIMPLE [(temp_tbl3)temp_tbl3.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] POSTHOOK: Lineage: temp_tbl3_idx_tbl._offsets EXPRESSION [(temp_tbl3)temp_tbl3.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: temp_tbl3_idx_tbl.id SIMPLE [(temp_tbl3)temp_tbl3.FieldSchema(name:id, type:int, comment:null), ] -PREHOOK: query: -- drop the database with cascade -DROP DATABASE db5 CASCADE +PREHOOK: query: DROP DATABASE db5 CASCADE PREHOOK: type: DROPDATABASE PREHOOK: Input: database:db5 PREHOOK: Output: database:db5 @@ -408,8 +381,7 @@ PREHOOK: Output: db5@temp_tbl2_view PREHOOK: Output: db5@temp_tbl3 PREHOOK: Output: db5@temp_tbl3_idx_tbl PREHOOK: Output: db5@temp_tbl_view -POSTHOOK: query: -- drop the database with cascade -DROP DATABASE db5 CASCADE +POSTHOOK: query: DROP DATABASE db5 CASCADE POSTHOOK: type: DROPDATABASE POSTHOOK: Input: database:db5 POSTHOOK: Output: database:db5 diff --git a/ql/src/test/results/clientpositive/date_1.q.out b/ql/src/test/results/clientpositive/date_1.q.out index df9fc47..4ec31b4 100644 --- a/ql/src/test/results/clientpositive/date_1.q.out +++ b/ql/src/test/results/clientpositive/date_1.q.out @@ -97,8 +97,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@date_1 #### A masked pattern was here #### 2011-01-01 1 -PREHOOK: query: -- Valid casts -select +PREHOOK: query: select cast('2012-01-01' as string), cast(d as string), cast(d as timestamp), @@ -108,8 +107,7 @@ from date_1 limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@date_1 #### A masked pattern was here #### -POSTHOOK: query: -- Valid casts -select +POSTHOOK: query: select cast('2012-01-01' as string), cast(d as string), cast(d as timestamp), @@ -120,8 +118,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@date_1 #### A masked pattern was here #### 2012-01-01 2011-01-01 2011-01-01 00:00:00 2011-01-01 2011-01-01 -PREHOOK: query: -- Invalid casts. -select +PREHOOK: query: select cast(d as boolean), cast(d as tinyint), cast(d as smallint), @@ -133,8 +130,7 @@ from date_1 limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@date_1 #### A masked pattern was here #### -POSTHOOK: query: -- Invalid casts. -select +POSTHOOK: query: select cast(d as boolean), cast(d as tinyint), cast(d as smallint), @@ -147,8 +143,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@date_1 #### A masked pattern was here #### NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: -- These comparisons should all be true -select +PREHOOK: query: select date '2011-01-01' = date '2011-01-01', unix_timestamp(date '2011-01-01') = unix_timestamp(date '2011-01-01'), unix_timestamp(date '2011-01-01') = unix_timestamp(cast(date '2011-01-01' as timestamp)), @@ -163,8 +158,7 @@ from date_1 limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@date_1 #### A masked pattern was here #### -POSTHOOK: query: -- These comparisons should all be true -select +POSTHOOK: query: select date '2011-01-01' = date '2011-01-01', unix_timestamp(date '2011-01-01') = unix_timestamp(date '2011-01-01'), unix_timestamp(date '2011-01-01') = unix_timestamp(cast(date '2011-01-01' as timestamp)), diff --git a/ql/src/test/results/clientpositive/date_4.q.out b/ql/src/test/results/clientpositive/date_4.q.out index 02e5c96..e254a56 100644 --- a/ql/src/test/results/clientpositive/date_4.q.out +++ b/ql/src/test/results/clientpositive/date_4.q.out @@ -18,14 +18,12 @@ POSTHOOK: query: alter table date_4 set serde 'org.apache.hadoop.hive.serde2.laz POSTHOOK: type: ALTERTABLE_SERIALIZER POSTHOOK: Input: default@date_4 POSTHOOK: Output: default@date_4 -PREHOOK: query: -- Test date literal syntax -insert overwrite table date_4 +PREHOOK: query: insert overwrite table date_4 select date '2011-01-01' from src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@date_4 -POSTHOOK: query: -- Test date literal syntax -insert overwrite table date_4 +POSTHOOK: query: insert overwrite table date_4 select date '2011-01-01' from src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/date_comparison.q.out b/ql/src/test/results/clientpositive/date_comparison.q.out index 73e8560..69a107e 100644 --- a/ql/src/test/results/clientpositive/date_comparison.q.out +++ b/ql/src/test/results/clientpositive/date_comparison.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Comparisons against same value -select cast('2011-05-06' as date) > +PREHOOK: query: select cast('2011-05-06' as date) > cast('2011-05-06' as date) from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Comparisons against same value -select cast('2011-05-06' as date) > +POSTHOOK: query: select cast('2011-05-06' as date) > cast('2011-05-06' as date) from src limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -66,14 +64,12 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### true -PREHOOK: query: -- Now try with differing values -select cast('2011-05-05' as date) > +PREHOOK: query: select cast('2011-05-05' as date) > cast('2011-05-06' as date) from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Now try with differing values -select cast('2011-05-05' as date) > +POSTHOOK: query: select cast('2011-05-05' as date) > cast('2011-05-06' as date) from src limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/date_join1.q.out b/ql/src/test/results/clientpositive/date_join1.q.out index d551f5b..1aa9042 100644 --- a/ql/src/test/results/clientpositive/date_join1.q.out +++ b/ql/src/test/results/clientpositive/date_join1.q.out @@ -2,9 +2,7 @@ PREHOOK: query: drop table date_join1 PREHOOK: type: DROPTABLE POSTHOOK: query: drop table date_join1 POSTHOOK: type: DROPTABLE -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table date_join1 ( +PREHOOK: query: create table date_join1 ( ORIGIN_CITY_NAME string, DEST_CITY_NAME string, FL_DATE date, @@ -14,9 +12,7 @@ create table date_join1 ( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@date_join1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table date_join1 ( +POSTHOOK: query: create table date_join1 ( ORIGIN_CITY_NAME string, DEST_CITY_NAME string, FL_DATE date, @@ -34,16 +30,14 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/flights_join.txt' OVER POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@date_join1 -PREHOOK: query: -- Note that there are 2 rows with date 2000-11-28, so we should expect 4 rows with that date in the join results -select t1.fl_num, t1.fl_date, t2.fl_num, t2.fl_date +PREHOOK: query: select t1.fl_num, t1.fl_date, t2.fl_num, t2.fl_date from date_join1 t1 join date_join1 t2 on (t1.fl_date = t2.fl_date) PREHOOK: type: QUERY PREHOOK: Input: default@date_join1 #### A masked pattern was here #### -POSTHOOK: query: -- Note that there are 2 rows with date 2000-11-28, so we should expect 4 rows with that date in the join results -select t1.fl_num, t1.fl_date, t2.fl_num, t2.fl_date +POSTHOOK: query: select t1.fl_num, t1.fl_date, t2.fl_num, t2.fl_date from date_join1 t1 join date_join1 t2 on (t1.fl_date = t2.fl_date) diff --git a/ql/src/test/results/clientpositive/date_serde.q.out b/ql/src/test/results/clientpositive/date_serde.q.out index ff09f70..f3985b0 100644 --- a/ql/src/test/results/clientpositive/date_serde.q.out +++ b/ql/src/test/results/clientpositive/date_serde.q.out @@ -22,10 +22,7 @@ PREHOOK: query: drop table date_serde_orc PREHOOK: type: DROPTABLE POSTHOOK: query: drop table date_serde_orc POSTHOOK: type: DROPTABLE -PREHOOK: query: -- --- RegexSerDe --- -create table date_serde_regex ( +PREHOOK: query: create table date_serde_regex ( ORIGIN_CITY_NAME string, DEST_CITY_NAME string, FL_DATE date, @@ -40,10 +37,7 @@ stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@date_serde_regex -POSTHOOK: query: -- --- RegexSerDe --- -create table date_serde_regex ( +POSTHOOK: query: create table date_serde_regex ( ORIGIN_CITY_NAME string, DEST_CITY_NAME string, FL_DATE date, @@ -231,20 +225,14 @@ POSTHOOK: Input: default@date_serde_regex 2010-10-29 12 2010-10-30 11 2010-10-31 8 -PREHOOK: query: -- --- LazyBinary --- -create table date_serde_lb ( +PREHOOK: query: create table date_serde_lb ( c1 date, c2 int ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@date_serde_lb -POSTHOOK: query: -- --- LazyBinary --- -create table date_serde_lb ( +POSTHOOK: query: create table date_serde_lb ( c1 date, c2 int ) @@ -289,20 +277,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@date_serde_lb #### A masked pattern was here #### 2010-10-20 1064 -PREHOOK: query: -- --- LazySimple --- -create table date_serde_ls ( +PREHOOK: query: create table date_serde_ls ( c1 date, c2 int ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@date_serde_ls -POSTHOOK: query: -- --- LazySimple --- -create table date_serde_ls ( +POSTHOOK: query: create table date_serde_ls ( c1 date, c2 int ) @@ -347,20 +329,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@date_serde_ls #### A masked pattern was here #### 2010-10-20 1064 -PREHOOK: query: -- --- Columnar --- -create table date_serde_c ( +PREHOOK: query: create table date_serde_c ( c1 date, c2 int ) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@date_serde_c -POSTHOOK: query: -- --- Columnar --- -create table date_serde_c ( +POSTHOOK: query: create table date_serde_c ( c1 date, c2 int ) stored as rcfile @@ -405,20 +381,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@date_serde_c #### A masked pattern was here #### 2010-10-20 1064 -PREHOOK: query: -- --- LazyBinaryColumnar --- -create table date_serde_lbc ( +PREHOOK: query: create table date_serde_lbc ( c1 date, c2 int ) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@date_serde_lbc -POSTHOOK: query: -- --- LazyBinaryColumnar --- -create table date_serde_lbc ( +POSTHOOK: query: create table date_serde_lbc ( c1 date, c2 int ) stored as rcfile @@ -463,20 +433,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@date_serde_lbc #### A masked pattern was here #### 2010-10-20 1064 -PREHOOK: query: -- --- ORC --- -create table date_serde_orc ( +PREHOOK: query: create table date_serde_orc ( c1 date, c2 int ) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@date_serde_orc -POSTHOOK: query: -- --- ORC --- -create table date_serde_orc ( +POSTHOOK: query: create table date_serde_orc ( c1 date, c2 int ) stored as orc diff --git a/ql/src/test/results/clientpositive/date_udf.q.out b/ql/src/test/results/clientpositive/date_udf.q.out index 2037367..37ad29e 100644 --- a/ql/src/test/results/clientpositive/date_udf.q.out +++ b/ql/src/test/results/clientpositive/date_udf.q.out @@ -74,15 +74,13 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt.1' OV POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@date_udf_flight -PREHOOK: query: -- Test UDFs with date input -select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d), +PREHOOK: query: select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d), weekofyear(d), to_date(d) from date_udf PREHOOK: type: QUERY PREHOOK: Input: default@date_udf #### A masked pattern was here #### -POSTHOOK: query: -- Test UDFs with date input -select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d), +POSTHOOK: query: select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d), weekofyear(d), to_date(d) from date_udf POSTHOOK: type: QUERY @@ -115,15 +113,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@date_udf #### A masked pattern was here #### 0 3333 -3333 -3333 3333 -PREHOOK: query: -- Test UDFs with string input -select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d), +PREHOOK: query: select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d), weekofyear(d), to_date(d) from date_udf_string PREHOOK: type: QUERY PREHOOK: Input: default@date_udf_string #### A masked pattern was here #### -POSTHOOK: query: -- Test UDFs with string input -select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d), +POSTHOOK: query: select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d), weekofyear(d), to_date(d) from date_udf_string POSTHOOK: type: QUERY @@ -195,8 +191,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@date_udf #### A masked pattern was here #### 1970-01-01 08:00:00 1969-12-31 16:00:00 2013-06-19 07:00:00 2013-06-18 17:00:00 -PREHOOK: query: -- should all be true -select +PREHOOK: query: select to_utc_timestamp(date '1970-01-01', 'America/Los_Angeles') = to_utc_timestamp(timestamp('1970-01-01 00:00:00'), 'America/Los_Angeles'), from_utc_timestamp(date '1970-01-01', 'America/Los_Angeles') = from_utc_timestamp(timestamp('1970-01-01 00:00:00'), 'America/Los_Angeles'), to_utc_timestamp(date '2013-06-19', 'America/Los_Angeles') = to_utc_timestamp(timestamp('2013-06-19 00:00:00'), 'America/Los_Angeles'), @@ -205,8 +200,7 @@ select PREHOOK: type: QUERY PREHOOK: Input: default@date_udf #### A masked pattern was here #### -POSTHOOK: query: -- should all be true -select +POSTHOOK: query: select to_utc_timestamp(date '1970-01-01', 'America/Los_Angeles') = to_utc_timestamp(timestamp('1970-01-01 00:00:00'), 'America/Los_Angeles'), from_utc_timestamp(date '1970-01-01', 'America/Los_Angeles') = from_utc_timestamp(timestamp('1970-01-01 00:00:00'), 'America/Los_Angeles'), to_utc_timestamp(date '2013-06-19', 'America/Los_Angeles') = to_utc_timestamp(timestamp('2013-06-19 00:00:00'), 'America/Los_Angeles'), @@ -216,13 +210,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@date_udf #### A masked pattern was here #### true true true true -PREHOOK: query: -- Aggregation functions (min/max) -select min(fl_date) from date_udf_flight +PREHOOK: query: select min(fl_date) from date_udf_flight PREHOOK: type: QUERY PREHOOK: Input: default@date_udf_flight #### A masked pattern was here #### -POSTHOOK: query: -- Aggregation functions (min/max) -select min(fl_date) from date_udf_flight +POSTHOOK: query: select min(fl_date) from date_udf_flight POSTHOOK: type: QUERY POSTHOOK: Input: default@date_udf_flight #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/decimal_join.q.out b/ql/src/test/results/clientpositive/decimal_join.q.out index cc669a6..55bd03f 100644 --- a/ql/src/test/results/clientpositive/decimal_join.q.out +++ b/ql/src/test/results/clientpositive/decimal_join.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- HIVE-5292 Join on decimal columns fails --- SORT_QUERY_RESULTS - -create table src_dec (key decimal(3,0), value string) +PREHOOK: query: create table src_dec (key decimal(3,0), value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_dec -POSTHOOK: query: -- HIVE-5292 Join on decimal columns fails --- SORT_QUERY_RESULTS - -create table src_dec (key decimal(3,0), value string) +POSTHOOK: query: create table src_dec (key decimal(3,0), value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_dec diff --git a/ql/src/test/results/clientpositive/decimal_precision.q.out b/ql/src/test/results/clientpositive/decimal_precision.q.out index 9fbc8f5..211de31 100644 --- a/ql/src/test/results/clientpositive/decimal_precision.q.out +++ b/ql/src/test/results/clientpositive/decimal_precision.q.out @@ -631,13 +631,11 @@ POSTHOOK: query: DROP TABLE DECIMAL_PRECISION POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@decimal_precision POSTHOOK: Output: default@decimal_precision -PREHOOK: query: -- Expect overflow and return null as the value -CREATE TABLE DECIMAL_PRECISION(dec decimal(38,18)) +PREHOOK: query: CREATE TABLE DECIMAL_PRECISION(dec decimal(38,18)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DECIMAL_PRECISION -POSTHOOK: query: -- Expect overflow and return null as the value -CREATE TABLE DECIMAL_PRECISION(dec decimal(38,18)) +POSTHOOK: query: CREATE TABLE DECIMAL_PRECISION(dec decimal(38,18)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DECIMAL_PRECISION diff --git a/ql/src/test/results/clientpositive/decimal_udf.q.out b/ql/src/test/results/clientpositive/decimal_udf.q.out index c0baab8..70e9dbf 100644 --- a/ql/src/test/results/clientpositive/decimal_udf.q.out +++ b/ql/src/test/results/clientpositive/decimal_udf.q.out @@ -24,11 +24,9 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv7.txt' INTO TABLE DE POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@decimal_udf -PREHOOK: query: -- addition -EXPLAIN SELECT key + key FROM DECIMAL_UDF +PREHOOK: query: EXPLAIN SELECT key + key FROM DECIMAL_UDF PREHOOK: type: QUERY -POSTHOOK: query: -- addition -EXPLAIN SELECT key + key FROM DECIMAL_UDF +POSTHOOK: query: EXPLAIN SELECT key + key FROM DECIMAL_UDF POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -294,11 +292,9 @@ NULL 2.0 -1.2345678891234567E9 1.2345678911234567E9 -PREHOOK: query: -- substraction -EXPLAIN SELECT key - key FROM DECIMAL_UDF +PREHOOK: query: EXPLAIN SELECT key - key FROM DECIMAL_UDF PREHOOK: type: QUERY -POSTHOOK: query: -- substraction -EXPLAIN SELECT key - key FROM DECIMAL_UDF +POSTHOOK: query: EXPLAIN SELECT key - key FROM DECIMAL_UDF POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -564,11 +560,9 @@ NULL 0.0 -1.2345678911234567E9 1.2345678891234567E9 -PREHOOK: query: -- multiplication -EXPLAIN SELECT key * key FROM DECIMAL_UDF +PREHOOK: query: EXPLAIN SELECT key * key FROM DECIMAL_UDF PREHOOK: type: QUERY -POSTHOOK: query: -- multiplication -EXPLAIN SELECT key * key FROM DECIMAL_UDF +POSTHOOK: query: EXPLAIN SELECT key * key FROM DECIMAL_UDF POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -889,11 +883,9 @@ NULL 2.0 -2.4691357802469134E9 2.4691357802469134E9 -PREHOOK: query: -- division -EXPLAIN SELECT key / 0 FROM DECIMAL_UDF limit 1 +PREHOOK: query: EXPLAIN SELECT key / 0 FROM DECIMAL_UDF limit 1 PREHOOK: type: QUERY -POSTHOOK: query: -- division -EXPLAIN SELECT key / 0 FROM DECIMAL_UDF limit 1 +POSTHOOK: query: EXPLAIN SELECT key / 0 FROM DECIMAL_UDF limit 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -1202,11 +1194,9 @@ NULL 1.5 -6.172839440617284E8 6.172839460617284E8 -PREHOOK: query: -- abs -EXPLAIN SELECT abs(key) FROM DECIMAL_UDF +PREHOOK: query: EXPLAIN SELECT abs(key) FROM DECIMAL_UDF PREHOOK: type: QUERY -POSTHOOK: query: -- abs -EXPLAIN SELECT abs(key) FROM DECIMAL_UDF +POSTHOOK: query: EXPLAIN SELECT abs(key) FROM DECIMAL_UDF POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -1271,11 +1261,9 @@ NULL 1.0000000000 1234567890.1234567890 1234567890.1234567800 -PREHOOK: query: -- avg -EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value +PREHOOK: query: EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value PREHOOK: type: QUERY -POSTHOOK: query: -- avg -EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value +POSTHOOK: query: EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1376,11 +1364,9 @@ POSTHOOK: Input: default@decimal_udf 200 200.00000000000000000000000 200.00000000000000 200.0000000000 4400 -4400.00000000000000000000000 -4400.00000000000000 -4400.0000000000 1234567890 1234567890.12345678000000000000000 1234567890.12345678000000 1234567890.1234567800 -PREHOOK: query: -- negative -EXPLAIN SELECT -key FROM DECIMAL_UDF +PREHOOK: query: EXPLAIN SELECT -key FROM DECIMAL_UDF PREHOOK: type: QUERY -POSTHOOK: query: -- negative -EXPLAIN SELECT -key FROM DECIMAL_UDF +POSTHOOK: query: EXPLAIN SELECT -key FROM DECIMAL_UDF POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -1445,11 +1431,9 @@ NULL -1.0000000000 1234567890.1234567890 -1234567890.1234567800 -PREHOOK: query: -- positive -EXPLAIN SELECT +key FROM DECIMAL_UDF +PREHOOK: query: EXPLAIN SELECT +key FROM DECIMAL_UDF PREHOOK: type: QUERY -POSTHOOK: query: -- positive -EXPLAIN SELECT +key FROM DECIMAL_UDF +POSTHOOK: query: EXPLAIN SELECT +key FROM DECIMAL_UDF POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -1514,11 +1498,9 @@ NULL 1.0000000000 -1234567890.1234567890 1234567890.1234567800 -PREHOOK: query: -- ceiling -EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF +PREHOOK: query: EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF PREHOOK: type: QUERY -POSTHOOK: query: -- ceiling -EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF +POSTHOOK: query: EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -1583,11 +1565,9 @@ NULL 1 -1234567890 1234567891 -PREHOOK: query: -- floor -EXPLAIN SELECT FLOOR(key) FROM DECIMAL_UDF +PREHOOK: query: EXPLAIN SELECT FLOOR(key) FROM DECIMAL_UDF PREHOOK: type: QUERY -POSTHOOK: query: -- floor -EXPLAIN SELECT FLOOR(key) FROM DECIMAL_UDF +POSTHOOK: query: EXPLAIN SELECT FLOOR(key) FROM DECIMAL_UDF POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -1652,11 +1632,9 @@ NULL 1 -1234567891 1234567890 -PREHOOK: query: -- round -EXPLAIN SELECT ROUND(key, 2) FROM DECIMAL_UDF +PREHOOK: query: EXPLAIN SELECT ROUND(key, 2) FROM DECIMAL_UDF PREHOOK: type: QUERY -POSTHOOK: query: -- round -EXPLAIN SELECT ROUND(key, 2) FROM DECIMAL_UDF +POSTHOOK: query: EXPLAIN SELECT ROUND(key, 2) FROM DECIMAL_UDF POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -1721,11 +1699,9 @@ NULL 1.00 -1234567890.12 1234567890.12 -PREHOOK: query: -- power -EXPLAIN SELECT POWER(key, 2) FROM DECIMAL_UDF +PREHOOK: query: EXPLAIN SELECT POWER(key, 2) FROM DECIMAL_UDF PREHOOK: type: QUERY -POSTHOOK: query: -- power -EXPLAIN SELECT POWER(key, 2) FROM DECIMAL_UDF +POSTHOOK: query: EXPLAIN SELECT POWER(key, 2) FROM DECIMAL_UDF POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -1790,11 +1766,9 @@ NULL 1.0 1.52415787532388352E18 1.52415787532388352E18 -PREHOOK: query: -- modulo -EXPLAIN SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF +PREHOOK: query: EXPLAIN SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF PREHOOK: type: QUERY -POSTHOOK: query: -- modulo -EXPLAIN SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF +POSTHOOK: query: EXPLAIN SELECT (key + 1) % (key / 2) FROM DECIMAL_UDF POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -1859,11 +1833,9 @@ NULL 0.000000000000 -617283944.061728394500 1.000000000000 -PREHOOK: query: -- stddev, var -EXPLAIN SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value +PREHOOK: query: EXPLAIN SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value PREHOOK: type: QUERY -POSTHOOK: query: -- stddev, var -EXPLAIN SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value +POSTHOOK: query: EXPLAIN SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1938,11 +1910,9 @@ POSTHOOK: Input: default@decimal_udf 200 0.0 0.0 4400 0.0 0.0 1234567890 0.0 0.0 -PREHOOK: query: -- stddev_samp, var_samp -EXPLAIN SELECT value, stddev_samp(key), var_samp(key) FROM DECIMAL_UDF GROUP BY value +PREHOOK: query: EXPLAIN SELECT value, stddev_samp(key), var_samp(key) FROM DECIMAL_UDF GROUP BY value PREHOOK: type: QUERY -POSTHOOK: query: -- stddev_samp, var_samp -EXPLAIN SELECT value, stddev_samp(key), var_samp(key) FROM DECIMAL_UDF GROUP BY value +POSTHOOK: query: EXPLAIN SELECT value, stddev_samp(key), var_samp(key) FROM DECIMAL_UDF GROUP BY value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2017,11 +1987,9 @@ POSTHOOK: Input: default@decimal_udf 200 0.0 0.0 4400 0.0 0.0 1234567890 0.0 0.0 -PREHOOK: query: -- histogram -EXPLAIN SELECT histogram_numeric(key, 3) FROM DECIMAL_UDF +PREHOOK: query: EXPLAIN SELECT histogram_numeric(key, 3) FROM DECIMAL_UDF PREHOOK: type: QUERY -POSTHOOK: query: -- histogram -EXPLAIN SELECT histogram_numeric(key, 3) FROM DECIMAL_UDF +POSTHOOK: query: EXPLAIN SELECT histogram_numeric(key, 3) FROM DECIMAL_UDF POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2076,11 +2044,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### [{"x":-1.2345678901234567E9,"y":1.0},{"x":-144.50057142857142,"y":35.0},{"x":1.2345678901234567E9,"y":1.0}] -PREHOOK: query: -- min -EXPLAIN SELECT MIN(key) FROM DECIMAL_UDF +PREHOOK: query: EXPLAIN SELECT MIN(key) FROM DECIMAL_UDF PREHOOK: type: QUERY -POSTHOOK: query: -- min -EXPLAIN SELECT MIN(key) FROM DECIMAL_UDF +POSTHOOK: query: EXPLAIN SELECT MIN(key) FROM DECIMAL_UDF POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2135,11 +2101,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -1234567890.1234567890 -PREHOOK: query: -- max -EXPLAIN SELECT MAX(key) FROM DECIMAL_UDF +PREHOOK: query: EXPLAIN SELECT MAX(key) FROM DECIMAL_UDF PREHOOK: type: QUERY -POSTHOOK: query: -- max -EXPLAIN SELECT MAX(key) FROM DECIMAL_UDF +POSTHOOK: query: EXPLAIN SELECT MAX(key) FROM DECIMAL_UDF POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2194,11 +2158,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 1234567890.1234567800 -PREHOOK: query: -- count -EXPLAIN SELECT COUNT(key) FROM DECIMAL_UDF +PREHOOK: query: EXPLAIN SELECT COUNT(key) FROM DECIMAL_UDF PREHOOK: type: QUERY -POSTHOOK: query: -- count -EXPLAIN SELECT COUNT(key) FROM DECIMAL_UDF +POSTHOOK: query: EXPLAIN SELECT COUNT(key) FROM DECIMAL_UDF POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/describe_comment_indent.q.out b/ql/src/test/results/clientpositive/describe_comment_indent.q.out index 5b41fb8..7b109b3 100644 --- a/ql/src/test/results/clientpositive/describe_comment_indent.q.out +++ b/ql/src/test/results/clientpositive/describe_comment_indent.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- test comment indent processing for multi-line comments - -CREATE TABLE test_table( +PREHOOK: query: CREATE TABLE test_table( col1 INT COMMENT 'col1 one line comment', col2 STRING COMMENT 'col2 two lines comment', @@ -12,9 +10,7 @@ two lines' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table -POSTHOOK: query: -- test comment indent processing for multi-line comments - -CREATE TABLE test_table( +POSTHOOK: query: CREATE TABLE test_table( col1 INT COMMENT 'col1 one line comment', col2 STRING COMMENT 'col2 two lines comment', diff --git a/ql/src/test/results/clientpositive/describe_comment_nonascii.q.out b/ql/src/test/results/clientpositive/describe_comment_nonascii.q.out index 703fa14..de1cca9 100644 --- a/ql/src/test/results/clientpositive/describe_comment_nonascii.q.out +++ b/ql/src/test/results/clientpositive/describe_comment_nonascii.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- HIVE-2905 showing non-ascii comments - -create table dummy (col1 string, col2 string, col3 string) +PREHOOK: query: create table dummy (col1 string, col2 string, col3 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dummy -POSTHOOK: query: -- HIVE-2905 showing non-ascii comments - -create table dummy (col1 string, col2 string, col3 string) +POSTHOOK: query: create table dummy (col1 string, col2 string, col3 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dummy diff --git a/ql/src/test/results/clientpositive/describe_pretty.q.out b/ql/src/test/results/clientpositive/describe_pretty.q.out index 279f567..aa90a67 100644 --- a/ql/src/test/results/clientpositive/describe_pretty.q.out +++ b/ql/src/test/results/clientpositive/describe_pretty.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- test comment indent processing for multi-line comments - -CREATE TABLE test_table( +PREHOOK: query: CREATE TABLE test_table( col1 INT COMMENT 'col1 one line comment', col2 STRING COMMENT 'col2 two lines comment', @@ -18,9 +16,7 @@ two lines' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table -POSTHOOK: query: -- test comment indent processing for multi-line comments - -CREATE TABLE test_table( +POSTHOOK: query: CREATE TABLE test_table( col1 INT COMMENT 'col1 one line comment', col2 STRING COMMENT 'col2 two lines comment', @@ -38,38 +34,10 @@ two lines' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table -PREHOOK: query: -- There will be an extra tab at the end of each comment line in the output. --- This is because DESCRIBE command separates the column, type and --- comment field using a \t. DESCRIBE PRETTY uses spaces instead --- of \t to separate columns. Hive gets confused when it parses the string --- table description constructed in MetaDataPrettyFormatUtils, and adds a tab --- at the end of each line. --- There are three ways to address this: --- 1. Pad each row to the full terminal width with extra spaces. --- 2. Assume a maximum tab width of 8, and subtract 2 * 8 spaces from the --- available line width. This approach wastes upto 2 * 8 - 2 columns. --- 3. Since the pretty output is meant only for human consumption, do nothing. --- Just add a comment to the unit test file explaining what is happening. --- This is the approach chosen. - -DESCRIBE PRETTY test_table +PREHOOK: query: DESCRIBE PRETTY test_table PREHOOK: type: DESCTABLE PREHOOK: Input: default@test_table -POSTHOOK: query: -- There will be an extra tab at the end of each comment line in the output. --- This is because DESCRIBE command separates the column, type and --- comment field using a \t. DESCRIBE PRETTY uses spaces instead --- of \t to separate columns. Hive gets confused when it parses the string --- table description constructed in MetaDataPrettyFormatUtils, and adds a tab --- at the end of each line. --- There are three ways to address this: --- 1. Pad each row to the full terminal width with extra spaces. --- 2. Assume a maximum tab width of 8, and subtract 2 * 8 spaces from the --- available line width. This approach wastes upto 2 * 8 - 2 columns. --- 3. Since the pretty output is meant only for human consumption, do nothing. --- Just add a comment to the unit test file explaining what is happening. --- This is the approach chosen. - -DESCRIBE PRETTY test_table +POSTHOOK: query: DESCRIBE PRETTY test_table POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@test_table col_name data_type comment diff --git a/ql/src/test/results/clientpositive/describe_syntax.q.out b/ql/src/test/results/clientpositive/describe_syntax.q.out index 7d012ca..34f0816 100644 --- a/ql/src/test/results/clientpositive/describe_syntax.q.out +++ b/ql/src/test/results/clientpositive/describe_syntax.q.out @@ -39,12 +39,10 @@ POSTHOOK: query: ALTER TABLE t1 ADD PARTITION (ds='4', part='5') POSTHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: Output: db1@t1 POSTHOOK: Output: db1@t1@ds=4/part=5 -PREHOOK: query: -- describe table -DESCRIBE t1 +PREHOOK: query: DESCRIBE t1 PREHOOK: type: DESCTABLE PREHOOK: Input: db1@t1 -POSTHOOK: query: -- describe table -DESCRIBE t1 +POSTHOOK: query: DESCRIBE t1 POSTHOOK: type: DESCTABLE POSTHOOK: Input: db1@t1 key1 int @@ -111,12 +109,10 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- describe database.table -DESCRIBE db1.t1 +PREHOOK: query: DESCRIBE db1.t1 PREHOOK: type: DESCTABLE PREHOOK: Input: db1@t1 -POSTHOOK: query: -- describe database.table -DESCRIBE db1.t1 +POSTHOOK: query: DESCRIBE db1.t1 POSTHOOK: type: DESCTABLE POSTHOOK: Input: db1@t1 key1 int @@ -183,12 +179,10 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- describe table column -DESCRIBE t1 key1 +PREHOOK: query: DESCRIBE t1 key1 PREHOOK: type: DESCTABLE PREHOOK: Input: db1@t1 -POSTHOOK: query: -- describe table column -DESCRIBE t1 key1 +POSTHOOK: query: DESCRIBE t1 key1 POSTHOOK: type: DESCTABLE POSTHOOK: Input: db1@t1 key1 int from deserializer @@ -208,12 +202,10 @@ POSTHOOK: Input: db1@t1 # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment key1 int from deserializer -PREHOOK: query: -- describe database.tabe column -DESCRIBE db1.t1 key1 +PREHOOK: query: DESCRIBE db1.t1 key1 PREHOOK: type: DESCTABLE PREHOOK: Input: db1@t1 -POSTHOOK: query: -- describe database.tabe column -DESCRIBE db1.t1 key1 +POSTHOOK: query: DESCRIBE db1.t1 key1 POSTHOOK: type: DESCTABLE POSTHOOK: Input: db1@t1 key1 int from deserializer @@ -233,12 +225,10 @@ POSTHOOK: Input: db1@t1 # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment key1 int from deserializer -PREHOOK: query: -- describe table column -DESCRIBE t1 key1 +PREHOOK: query: DESCRIBE t1 key1 PREHOOK: type: DESCTABLE PREHOOK: Input: db1@t1 -POSTHOOK: query: -- describe table column -DESCRIBE t1 key1 +POSTHOOK: query: DESCRIBE t1 key1 POSTHOOK: type: DESCTABLE POSTHOOK: Input: db1@t1 key1 int from deserializer @@ -258,12 +248,10 @@ POSTHOOK: Input: db1@t1 # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment key1 int from deserializer -PREHOOK: query: -- describe table partition -DESCRIBE t1 PARTITION(ds='4', part='5') +PREHOOK: query: DESCRIBE t1 PARTITION(ds='4', part='5') PREHOOK: type: DESCTABLE PREHOOK: Input: db1@t1 -POSTHOOK: query: -- describe table partition -DESCRIBE t1 PARTITION(ds='4', part='5') +POSTHOOK: query: DESCRIBE t1 PARTITION(ds='4', part='5') POSTHOOK: type: DESCTABLE POSTHOOK: Input: db1@t1 key1 int @@ -334,12 +322,10 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- describe database.table partition -DESCRIBE db1.t1 PARTITION(ds='4', part='5') +PREHOOK: query: DESCRIBE db1.t1 PARTITION(ds='4', part='5') PREHOOK: type: DESCTABLE PREHOOK: Input: db1@t1 -POSTHOOK: query: -- describe database.table partition -DESCRIBE db1.t1 PARTITION(ds='4', part='5') +POSTHOOK: query: DESCRIBE db1.t1 PARTITION(ds='4', part='5') POSTHOOK: type: DESCTABLE POSTHOOK: Input: db1@t1 key1 int diff --git a/ql/src/test/results/clientpositive/describe_xpath.q.out b/ql/src/test/results/clientpositive/describe_xpath.q.out index 58ff765..4d40fc2 100644 --- a/ql/src/test/results/clientpositive/describe_xpath.q.out +++ b/ql/src/test/results/clientpositive/describe_xpath.q.out @@ -1,56 +1,44 @@ -PREHOOK: query: -- Describe a list structure in a thrift table -describe src_thrift lint +PREHOOK: query: describe src_thrift lint PREHOOK: type: DESCTABLE PREHOOK: Input: default@src_thrift -POSTHOOK: query: -- Describe a list structure in a thrift table -describe src_thrift lint +POSTHOOK: query: describe src_thrift lint POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@src_thrift lint array from deserializer -PREHOOK: query: -- Describe the element of a list -describe src_thrift lint.$elem$ +PREHOOK: query: describe src_thrift lint.$elem$ PREHOOK: type: DESCTABLE PREHOOK: Input: default@src_thrift -POSTHOOK: query: -- Describe the element of a list -describe src_thrift lint.$elem$ +POSTHOOK: query: describe src_thrift lint.$elem$ POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@src_thrift $elem$ int from deserializer -PREHOOK: query: -- Describe the key of a map -describe src_thrift mStringString.$key$ +PREHOOK: query: describe src_thrift mStringString.$key$ PREHOOK: type: DESCTABLE PREHOOK: Input: default@src_thrift -POSTHOOK: query: -- Describe the key of a map -describe src_thrift mStringString.$key$ +POSTHOOK: query: describe src_thrift mStringString.$key$ POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@src_thrift $key$ string from deserializer -PREHOOK: query: -- Describe the value of a map -describe src_thrift mStringString.$value$ +PREHOOK: query: describe src_thrift mStringString.$value$ PREHOOK: type: DESCTABLE PREHOOK: Input: default@src_thrift -POSTHOOK: query: -- Describe the value of a map -describe src_thrift mStringString.$value$ +POSTHOOK: query: describe src_thrift mStringString.$value$ POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@src_thrift $value$ string from deserializer -PREHOOK: query: -- Describe a complex element of a list -describe src_thrift lintString.$elem$ +PREHOOK: query: describe src_thrift lintString.$elem$ PREHOOK: type: DESCTABLE PREHOOK: Input: default@src_thrift -POSTHOOK: query: -- Describe a complex element of a list -describe src_thrift lintString.$elem$ +POSTHOOK: query: describe src_thrift lintString.$elem$ POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@src_thrift myint int from deserializer mystring string from deserializer underscore_int int from deserializer -PREHOOK: query: -- Describe a member of an element of a list -describe src_thrift lintString.$elem$.myint +PREHOOK: query: describe src_thrift lintString.$elem$.myint PREHOOK: type: DESCTABLE PREHOOK: Input: default@src_thrift -POSTHOOK: query: -- Describe a member of an element of a list -describe src_thrift lintString.$elem$.myint +POSTHOOK: query: describe src_thrift lintString.$elem$.myint POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@src_thrift myint int from deserializer diff --git a/ql/src/test/results/clientpositive/diff_part_input_formats.q.out b/ql/src/test/results/clientpositive/diff_part_input_formats.q.out index 2b46917..b19e9cc 100644 --- a/ql/src/test/results/clientpositive/diff_part_input_formats.q.out +++ b/ql/src/test/results/clientpositive/diff_part_input_formats.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- Tests the case where a table is changed from sequence file to a RC file, --- resulting in partitions in both file formats. If no valid partitions are --- selected, then it should still use RC file for reading the dummy partition. -CREATE TABLE part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) STORED AS SEQUENCEFILE +PREHOOK: query: CREATE TABLE part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) STORED AS SEQUENCEFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@part_test -POSTHOOK: query: -- Tests the case where a table is changed from sequence file to a RC file, --- resulting in partitions in both file formats. If no valid partitions are --- selected, then it should still use RC file for reading the dummy partition. -CREATE TABLE part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) STORED AS SEQUENCEFILE +POSTHOOK: query: CREATE TABLE part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) STORED AS SEQUENCEFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@part_test diff --git a/ql/src/test/results/clientpositive/disallow_incompatible_type_change_off.q.out b/ql/src/test/results/clientpositive/disallow_incompatible_type_change_off.q.out index ad34732..e041d05 100644 --- a/ql/src/test/results/clientpositive/disallow_incompatible_type_change_off.q.out +++ b/ql/src/test/results/clientpositive/disallow_incompatible_type_change_off.q.out @@ -36,13 +36,11 @@ POSTHOOK: Input: default@test_table123 POSTHOOK: Input: default@test_table123@ds=foo1 #### A masked pattern was here #### 1 {"a1":"b1"} foo1 -PREHOOK: query: -- This should now work as hive.metastore.disallow.incompatible.col.type.changes is false -ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING) +PREHOOK: query: ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING) PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@test_table123 PREHOOK: Output: default@test_table123 -POSTHOOK: query: -- This should now work as hive.metastore.disallow.incompatible.col.type.changes is false -ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING) +POSTHOOK: query: ALTER TABLE test_table123 REPLACE COLUMNS (a INT, b STRING) POSTHOOK: type: ALTERTABLE_REPLACECOLS POSTHOOK: Input: default@test_table123 POSTHOOK: Output: default@test_table123 diff --git a/ql/src/test/results/clientpositive/driverhook.q.out b/ql/src/test/results/clientpositive/driverhook.q.out index cf22a22..66793c3 100644 --- a/ql/src/test/results/clientpositive/driverhook.q.out +++ b/ql/src/test/results/clientpositive/driverhook.q.out @@ -1,20 +1,14 @@ --- This query should appear in the Hive CLI output. --- We test DriverTestHook, which does exactly that. --- This should not break. -SELECT * FROM src LIMIT 1 -PREHOOK: query: -- This query should appear in the Hive CLI output. --- We test DriverTestHook, which does exactly that. --- This should not break. + + + SELECT * FROM src LIMIT 1 +PREHOOK: query: SELECT * FROM src LIMIT 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- This query should appear in the Hive CLI output. --- We test DriverTestHook, which does exactly that. --- This should not break. -SELECT * FROM src LIMIT 1 +POSTHOOK: query: SELECT * FROM src LIMIT 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/drop_database_removes_partition_dirs.q.out b/ql/src/test/results/clientpositive/drop_database_removes_partition_dirs.q.out index 499c996..cb578e1 100644 --- a/ql/src/test/results/clientpositive/drop_database_removes_partition_dirs.q.out +++ b/ql/src/test/results/clientpositive/drop_database_removes_partition_dirs.q.out @@ -1,13 +1,7 @@ -PREHOOK: query: -- This test verifies that if a partition exists outside a table's current location when the --- database is dropped the partition's location is dropped as well. - -CREATE DATABASE test_database +PREHOOK: query: CREATE DATABASE test_database PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:test_database -POSTHOOK: query: -- This test verifies that if a partition exists outside a table's current location when the --- database is dropped the partition's location is dropped as well. - -CREATE DATABASE test_database +POSTHOOK: query: CREATE DATABASE test_database POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:test_database PREHOOK: query: USE test_database diff --git a/ql/src/test/results/clientpositive/drop_index_removes_partition_dirs.q.out b/ql/src/test/results/clientpositive/drop_index_removes_partition_dirs.q.out index 5b8bffa..bbd86b4 100644 --- a/ql/src/test/results/clientpositive/drop_index_removes_partition_dirs.q.out +++ b/ql/src/test/results/clientpositive/drop_index_removes_partition_dirs.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- This test verifies that if a partition exists outside an index table's current location when the --- index is dropped the partition's location is dropped as well. - -CREATE TABLE test_table (key STRING, value STRING) +PREHOOK: query: CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE #### A masked pattern was here #### @@ -9,10 +6,7 @@ PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default PREHOOK: Output: default@test_table -POSTHOOK: query: -- This test verifies that if a partition exists outside an index table's current location when the --- index is dropped the partition's location is dropped as well. - -CREATE TABLE test_table (key STRING, value STRING) +POSTHOOK: query: CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/drop_table_removes_partition_dirs.q.out b/ql/src/test/results/clientpositive/drop_table_removes_partition_dirs.q.out index effd78f..2c34dd0 100644 --- a/ql/src/test/results/clientpositive/drop_table_removes_partition_dirs.q.out +++ b/ql/src/test/results/clientpositive/drop_table_removes_partition_dirs.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- This test verifies that if a partition exists outside the table's current location when the --- table is dropped the partition's location is dropped as well. - -CREATE TABLE test_table (key STRING, value STRING) +PREHOOK: query: CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE #### A masked pattern was here #### @@ -9,10 +6,7 @@ PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default PREHOOK: Output: default@test_table -POSTHOOK: query: -- This test verifies that if a partition exists outside the table's current location when the --- table is dropped the partition's location is dropped as well. - -CREATE TABLE test_table (key STRING, value STRING) +POSTHOOK: query: CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/drop_table_with_stats.q.out b/ql/src/test/results/clientpositive/drop_table_with_stats.q.out index fbc3ab9..52aa10a 100644 --- a/ql/src/test/results/clientpositive/drop_table_with_stats.q.out +++ b/ql/src/test/results/clientpositive/drop_table_with_stats.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- This test verifies that a table could be dropped with columns stats computed --- The column stats for table without partition will go to TAB_COL_STATS -CREATE DATABASE IF NOT EXISTS tblstatsdb1 +PREHOOK: query: CREATE DATABASE IF NOT EXISTS tblstatsdb1 PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:tblstatsdb1 -POSTHOOK: query: -- This test verifies that a table could be dropped with columns stats computed --- The column stats for table without partition will go to TAB_COL_STATS -CREATE DATABASE IF NOT EXISTS tblstatsdb1 +POSTHOOK: query: CREATE DATABASE IF NOT EXISTS tblstatsdb1 POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:tblstatsdb1 PREHOOK: query: USE tblstatsdb1 diff --git a/ql/src/test/results/clientpositive/druid_basic1.q.out b/ql/src/test/results/clientpositive/druid_basic1.q.out index 74ae9ed..c16f81e 100644 --- a/ql/src/test/results/clientpositive/druid_basic1.q.out +++ b/ql/src/test/results/clientpositive/druid_basic1.q.out @@ -60,15 +60,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- different table, same datasource -CREATE EXTERNAL TABLE druid_table_2 +PREHOOK: query: CREATE EXTERNAL TABLE druid_table_2 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' TBLPROPERTIES ("druid.datasource" = "wikipedia") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@druid_table_2 -POSTHOOK: query: -- different table, same datasource -CREATE EXTERNAL TABLE druid_table_2 +POSTHOOK: query: CREATE EXTERNAL TABLE druid_table_2 STORED BY 'org.apache.hadoop.hive.druid.QTestDruidStorageHandler' TBLPROPERTIES ("druid.datasource" = "wikipedia") POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/druid_basic2.q.out b/ql/src/test/results/clientpositive/druid_basic2.q.out index 48de99a..88f2ad6 100644 --- a/ql/src/test/results/clientpositive/druid_basic2.q.out +++ b/ql/src/test/results/clientpositive/druid_basic2.q.out @@ -60,12 +60,10 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- dimension -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT robot FROM druid_table_1 PREHOOK: type: QUERY -POSTHOOK: query: -- dimension -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT robot FROM druid_table_1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -89,12 +87,10 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- metric -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT delta FROM druid_table_1 PREHOOK: type: QUERY -POSTHOOK: query: -- metric -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT delta FROM druid_table_1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -180,8 +176,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- TODO: currently nothing is pushed - ISNOTNULL -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT a.robot, b.language FROM ( @@ -193,8 +188,7 @@ FROM ON a.language = b.language ) PREHOOK: type: QUERY -POSTHOOK: query: -- TODO: currently nothing is pushed - ISNOTNULL -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT a.robot, b.language FROM ( diff --git a/ql/src/test/results/clientpositive/druid_intervals.q.out b/ql/src/test/results/clientpositive/druid_intervals.q.out index b9a8313..ca3febf 100644 --- a/ql/src/test/results/clientpositive/druid_intervals.q.out +++ b/ql/src/test/results/clientpositive/druid_intervals.q.out @@ -60,13 +60,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- (-∞‥+∞) -EXPLAIN +PREHOOK: query: EXPLAIN SELECT `__time` FROM druid_table_1 PREHOOK: type: QUERY -POSTHOOK: query: -- (-∞‥+∞) -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT `__time` FROM druid_table_1 POSTHOOK: type: QUERY @@ -90,14 +88,12 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- (-∞‥2012-03-01 00:00:00) -EXPLAIN +PREHOOK: query: EXPLAIN SELECT `__time` FROM druid_table_1 WHERE `__time` < '2012-03-01 00:00:00' PREHOOK: type: QUERY -POSTHOOK: query: -- (-∞‥2012-03-01 00:00:00) -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT `__time` FROM druid_table_1 WHERE `__time` < '2012-03-01 00:00:00' @@ -122,14 +118,12 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- [2010-01-01 00:00:00‥2012-03-01 00:00:00) -EXPLAIN +PREHOOK: query: EXPLAIN SELECT `__time` FROM druid_table_1 WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00' PREHOOK: type: QUERY -POSTHOOK: query: -- [2010-01-01 00:00:00‥2012-03-01 00:00:00) -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT `__time` FROM druid_table_1 WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00' @@ -154,15 +148,13 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- [2010-01-01 00:00:00‥2011-01-01 00:00:00) -EXPLAIN +PREHOOK: query: EXPLAIN SELECT `__time` FROM druid_table_1 WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00' AND `__time` < '2011-01-01 00:00:00' PREHOOK: type: QUERY -POSTHOOK: query: -- [2010-01-01 00:00:00‥2011-01-01 00:00:00) -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT `__time` FROM druid_table_1 WHERE `__time` >= '2010-01-01 00:00:00' AND `__time` <= '2012-03-01 00:00:00' @@ -188,14 +180,12 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- [2010-01-01 00:00:00‥2011-01-01 00:00:00] -EXPLAIN +PREHOOK: query: EXPLAIN SELECT `__time` FROM druid_table_1 WHERE `__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00' PREHOOK: type: QUERY -POSTHOOK: query: -- [2010-01-01 00:00:00‥2011-01-01 00:00:00] -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT `__time` FROM druid_table_1 WHERE `__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00' @@ -220,15 +210,13 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- [2010-01-01 00:00:00‥2011-01-01 00:00:00],[2012-01-01 00:00:00‥2013-01-01 00:00:00] -EXPLAIN +PREHOOK: query: EXPLAIN SELECT `__time` FROM druid_table_1 WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00') OR (`__time` BETWEEN '2012-01-01 00:00:00' AND '2013-01-01 00:00:00') PREHOOK: type: QUERY -POSTHOOK: query: -- [2010-01-01 00:00:00‥2011-01-01 00:00:00],[2012-01-01 00:00:00‥2013-01-01 00:00:00] -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT `__time` FROM druid_table_1 WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00') @@ -254,15 +242,13 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- OVERLAP [2010-01-01 00:00:00‥2012-01-01 00:00:00] -EXPLAIN +PREHOOK: query: EXPLAIN SELECT `__time` FROM druid_table_1 WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00') OR (`__time` BETWEEN '2010-06-01 00:00:00' AND '2012-01-01 00:00:00') PREHOOK: type: QUERY -POSTHOOK: query: -- OVERLAP [2010-01-01 00:00:00‥2012-01-01 00:00:00] -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT `__time` FROM druid_table_1 WHERE (`__time` BETWEEN '2010-01-01 00:00:00' AND '2011-01-01 00:00:00') @@ -288,14 +274,12 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- IN: MULTIPLE INTERVALS [2010-01-01 00:00:00‥2010-01-01 00:00:00),[2011-01-01 00:00:00‥2011-01-01 00:00:00) -EXPLAIN +PREHOOK: query: EXPLAIN SELECT `__time` FROM druid_table_1 WHERE `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00') PREHOOK: type: QUERY -POSTHOOK: query: -- IN: MULTIPLE INTERVALS [2010-01-01 00:00:00‥2010-01-01 00:00:00),[2011-01-01 00:00:00‥2011-01-01 00:00:00) -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT `__time` FROM druid_table_1 WHERE `__time` IN ('2010-01-01 00:00:00','2011-01-01 00:00:00') diff --git a/ql/src/test/results/clientpositive/druid_timeseries.q.out b/ql/src/test/results/clientpositive/druid_timeseries.q.out index 1ed5573..6b2ffe9 100644 --- a/ql/src/test/results/clientpositive/druid_timeseries.q.out +++ b/ql/src/test/results/clientpositive/druid_timeseries.q.out @@ -60,13 +60,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- GRANULARITY: ALL -EXPLAIN +PREHOOK: query: EXPLAIN SELECT max(added), sum(variation) FROM druid_table_1 PREHOOK: type: QUERY -POSTHOOK: query: -- GRANULARITY: ALL -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT max(added), sum(variation) FROM druid_table_1 POSTHOOK: type: QUERY @@ -90,14 +88,12 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- GRANULARITY: NONE -EXPLAIN +PREHOOK: query: EXPLAIN SELECT `__time`, max(added), sum(variation) FROM druid_table_1 GROUP BY `__time` PREHOOK: type: QUERY -POSTHOOK: query: -- GRANULARITY: NONE -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT `__time`, max(added), sum(variation) FROM druid_table_1 GROUP BY `__time` @@ -122,14 +118,12 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- GRANULARITY: YEAR -EXPLAIN +PREHOOK: query: EXPLAIN SELECT floor_year(`__time`), max(added), sum(variation) FROM druid_table_1 GROUP BY floor_year(`__time`) PREHOOK: type: QUERY -POSTHOOK: query: -- GRANULARITY: YEAR -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT floor_year(`__time`), max(added), sum(variation) FROM druid_table_1 GROUP BY floor_year(`__time`) @@ -154,14 +148,12 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- GRANULARITY: QUARTER -EXPLAIN +PREHOOK: query: EXPLAIN SELECT floor_quarter(`__time`), max(added), sum(variation) FROM druid_table_1 GROUP BY floor_quarter(`__time`) PREHOOK: type: QUERY -POSTHOOK: query: -- GRANULARITY: QUARTER -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT floor_quarter(`__time`), max(added), sum(variation) FROM druid_table_1 GROUP BY floor_quarter(`__time`) @@ -186,14 +178,12 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- GRANULARITY: MONTH -EXPLAIN +PREHOOK: query: EXPLAIN SELECT floor_month(`__time`), max(added), sum(variation) FROM druid_table_1 GROUP BY floor_month(`__time`) PREHOOK: type: QUERY -POSTHOOK: query: -- GRANULARITY: MONTH -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT floor_month(`__time`), max(added), sum(variation) FROM druid_table_1 GROUP BY floor_month(`__time`) @@ -218,14 +208,12 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- GRANULARITY: WEEK -EXPLAIN +PREHOOK: query: EXPLAIN SELECT floor_week(`__time`), max(added), sum(variation) FROM druid_table_1 GROUP BY floor_week(`__time`) PREHOOK: type: QUERY -POSTHOOK: query: -- GRANULARITY: WEEK -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT floor_week(`__time`), max(added), sum(variation) FROM druid_table_1 GROUP BY floor_week(`__time`) @@ -250,14 +238,12 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- GRANULARITY: DAY -EXPLAIN +PREHOOK: query: EXPLAIN SELECT floor_day(`__time`), max(added), sum(variation) FROM druid_table_1 GROUP BY floor_day(`__time`) PREHOOK: type: QUERY -POSTHOOK: query: -- GRANULARITY: DAY -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT floor_day(`__time`), max(added), sum(variation) FROM druid_table_1 GROUP BY floor_day(`__time`) @@ -282,14 +268,12 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- GRANULARITY: HOUR -EXPLAIN +PREHOOK: query: EXPLAIN SELECT floor_hour(`__time`), max(added), sum(variation) FROM druid_table_1 GROUP BY floor_hour(`__time`) PREHOOK: type: QUERY -POSTHOOK: query: -- GRANULARITY: HOUR -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT floor_hour(`__time`), max(added), sum(variation) FROM druid_table_1 GROUP BY floor_hour(`__time`) @@ -314,14 +298,12 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- GRANULARITY: MINUTE -EXPLAIN +PREHOOK: query: EXPLAIN SELECT floor_minute(`__time`), max(added), sum(variation) FROM druid_table_1 GROUP BY floor_minute(`__time`) PREHOOK: type: QUERY -POSTHOOK: query: -- GRANULARITY: MINUTE -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT floor_minute(`__time`), max(added), sum(variation) FROM druid_table_1 GROUP BY floor_minute(`__time`) @@ -346,14 +328,12 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- GRANULARITY: SECOND -EXPLAIN +PREHOOK: query: EXPLAIN SELECT floor_second(`__time`), max(added), sum(variation) FROM druid_table_1 GROUP BY floor_second(`__time`) PREHOOK: type: QUERY -POSTHOOK: query: -- GRANULARITY: SECOND -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT floor_second(`__time`), max(added), sum(variation) FROM druid_table_1 GROUP BY floor_second(`__time`) @@ -378,15 +358,13 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- WITH FILTER ON DIMENSION -EXPLAIN +PREHOOK: query: EXPLAIN SELECT floor_hour(`__time`), max(added), sum(variation) FROM druid_table_1 WHERE robot='1' GROUP BY floor_hour(`__time`) PREHOOK: type: QUERY -POSTHOOK: query: -- WITH FILTER ON DIMENSION -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT floor_hour(`__time`), max(added), sum(variation) FROM druid_table_1 WHERE robot='1' @@ -412,8 +390,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- WITH FILTER ON TIME -EXPLAIN +PREHOOK: query: EXPLAIN SELECT floor_hour(`__time`), max(added), sum(variation) FROM druid_table_1 WHERE floor_hour(`__time`) @@ -421,8 +398,7 @@ WHERE floor_hour(`__time`) AND CAST('2014-01-01 00:00:00' AS TIMESTAMP) GROUP BY floor_hour(`__time`) PREHOOK: type: QUERY -POSTHOOK: query: -- WITH FILTER ON TIME -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT floor_hour(`__time`), max(added), sum(variation) FROM druid_table_1 WHERE floor_hour(`__time`) @@ -485,8 +461,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- WITH FILTER ON TIME -EXPLAIN +PREHOOK: query: EXPLAIN SELECT subq.h, subq.m, subq.s FROM ( @@ -497,8 +472,7 @@ FROM WHERE subq.h BETWEEN CAST('2010-01-01 00:00:00' AS TIMESTAMP) AND CAST('2014-01-01 00:00:00' AS TIMESTAMP) PREHOOK: type: QUERY -POSTHOOK: query: -- WITH FILTER ON TIME -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT subq.h, subq.m, subq.s FROM ( diff --git a/ql/src/test/results/clientpositive/druid_topn.q.out b/ql/src/test/results/clientpositive/druid_topn.q.out index 9a7ed6c..57d6586 100644 --- a/ql/src/test/results/clientpositive/druid_topn.q.out +++ b/ql/src/test/results/clientpositive/druid_topn.q.out @@ -60,16 +60,14 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- GRANULARITY: ALL -EXPLAIN +PREHOOK: query: EXPLAIN SELECT robot, max(added) as m, sum(variation) FROM druid_table_1 GROUP BY robot ORDER BY m DESC LIMIT 100 PREHOOK: type: QUERY -POSTHOOK: query: -- GRANULARITY: ALL -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT robot, max(added) as m, sum(variation) FROM druid_table_1 GROUP BY robot @@ -96,16 +94,14 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- GRANULARITY: NONE -EXPLAIN +PREHOOK: query: EXPLAIN SELECT robot, `__time`, max(added), sum(variation) as s FROM druid_table_1 GROUP BY robot, `__time` ORDER BY s DESC LIMIT 100 PREHOOK: type: QUERY -POSTHOOK: query: -- GRANULARITY: NONE -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT robot, `__time`, max(added), sum(variation) as s FROM druid_table_1 GROUP BY robot, `__time` @@ -132,16 +128,14 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- GRANULARITY: YEAR -EXPLAIN +PREHOOK: query: EXPLAIN SELECT robot, floor_year(`__time`), max(added), sum(variation) as s FROM druid_table_1 GROUP BY robot, floor_year(`__time`) ORDER BY s DESC LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- GRANULARITY: YEAR -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT robot, floor_year(`__time`), max(added), sum(variation) as s FROM druid_table_1 GROUP BY robot, floor_year(`__time`) @@ -168,16 +162,14 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- ASC: TRANSFORM INTO GROUP BY -EXPLAIN +PREHOOK: query: EXPLAIN SELECT robot, floor_month(`__time`), max(added), sum(variation) as s FROM druid_table_1 GROUP BY robot, floor_month(`__time`) ORDER BY s LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- ASC: TRANSFORM INTO GROUP BY -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT robot, floor_month(`__time`), max(added), sum(variation) as s FROM druid_table_1 GROUP BY robot, floor_month(`__time`) @@ -204,16 +196,14 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- MULTIPLE ORDER: TRANSFORM INTO GROUP BY -EXPLAIN +PREHOOK: query: EXPLAIN SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s FROM druid_table_1 GROUP BY robot, namespace, floor_month(`__time`) ORDER BY s DESC, m DESC LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- MULTIPLE ORDER: TRANSFORM INTO GROUP BY -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s FROM druid_table_1 GROUP BY robot, namespace, floor_month(`__time`) @@ -240,16 +230,14 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- MULTIPLE ORDER MIXED: TRANSFORM INTO GROUP BY -EXPLAIN +PREHOOK: query: EXPLAIN SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s FROM druid_table_1 GROUP BY robot, namespace, floor_month(`__time`) ORDER BY robot ASC, m DESC LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- MULTIPLE ORDER MIXED: TRANSFORM INTO GROUP BY -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT robot, floor_month(`__time`), max(added) as m, sum(variation) as s FROM druid_table_1 GROUP BY robot, namespace, floor_month(`__time`) @@ -276,8 +264,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- WITH FILTER ON DIMENSION: TRANSFORM INTO GROUP BY -EXPLAIN +PREHOOK: query: EXPLAIN SELECT robot, floor_year(`__time`), max(added), sum(variation) as s FROM druid_table_1 WHERE robot='1' @@ -285,8 +272,7 @@ GROUP BY robot, floor_year(`__time`) ORDER BY s LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- WITH FILTER ON DIMENSION: TRANSFORM INTO GROUP BY -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT robot, floor_year(`__time`), max(added), sum(variation) as s FROM druid_table_1 WHERE robot='1' @@ -314,8 +300,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE ListSink -PREHOOK: query: -- WITH FILTER ON TIME -EXPLAIN +PREHOOK: query: EXPLAIN SELECT robot, floor_hour(`__time`), max(added) as m, sum(variation) FROM druid_table_1 WHERE floor_hour(`__time`) @@ -325,8 +310,7 @@ GROUP BY robot, floor_hour(`__time`) ORDER BY m LIMIT 100 PREHOOK: type: QUERY -POSTHOOK: query: -- WITH FILTER ON TIME -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT robot, floor_hour(`__time`), max(added) as m, sum(variation) FROM druid_table_1 WHERE floor_hour(`__time`) diff --git a/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out b/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out index f1f8806..87166a7 100644 --- a/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out +++ b/ql/src/test/results/clientpositive/dynamic_rdd_cache.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- JOIN TEST - -EXPLAIN +PREHOOK: query: EXPLAIN FROM (SELECT src.* FROM src sort by key) X RIGHT OUTER JOIN @@ -11,9 +9,7 @@ JOIN ON (X.key = Z.key) SELECT sum(hash(Y.key,Y.value)) GROUP BY Y.key PREHOOK: type: QUERY -POSTHOOK: query: -- JOIN TEST - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM (SELECT src.* FROM src sort by key) X RIGHT OUTER JOIN @@ -375,15 +371,11 @@ POSTHOOK: query: DROP TABLE dest2 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@dest2 POSTHOOK: Output: default@dest2 -PREHOOK: query: -- UNION TEST - -CREATE TABLE tmptable(key STRING, value INT) +PREHOOK: query: CREATE TABLE tmptable(key STRING, value INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- UNION TEST - -CREATE TABLE tmptable(key STRING, value INT) +POSTHOOK: query: CREATE TABLE tmptable(key STRING, value INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable @@ -809,15 +801,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- CWE TEST - -CREATE TABLE inv(w_warehouse_name STRING , w_warehouse_sk INT , stdev INT , d_moy INT , mean INT , cov INT , inv_quantity_on_hand INT) +PREHOOK: query: CREATE TABLE inv(w_warehouse_name STRING , w_warehouse_sk INT , stdev INT , d_moy INT , mean INT , cov INT , inv_quantity_on_hand INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inv -POSTHOOK: query: -- CWE TEST - -CREATE TABLE inv(w_warehouse_name STRING , w_warehouse_sk INT , stdev INT , d_moy INT , mean INT , cov INT , inv_quantity_on_hand INT) +POSTHOOK: query: CREATE TABLE inv(w_warehouse_name STRING , w_warehouse_sk INT , stdev INT , d_moy INT , mean INT , cov INT , inv_quantity_on_hand INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inv diff --git a/ql/src/test/results/clientpositive/dynpart_sort_opt_bucketing.q.out b/ql/src/test/results/clientpositive/dynpart_sort_opt_bucketing.q.out index 3992db7..e370c9d 100644 --- a/ql/src/test/results/clientpositive/dynpart_sort_opt_bucketing.q.out +++ b/ql/src/test/results/clientpositive/dynpart_sort_opt_bucketing.q.out @@ -152,13 +152,11 @@ fffee943d640a7714d09f9bd50dba08a9d0ebdd146655e4642c293a4396cb385\N\N2015-01-2 fffc8a89a8406ba3ed651fc9248fe9b2afb1ee672999ef75d2d9c42484435bca\N\N2015-01-21 fff06c6e0fd675ebeff09350e6b7a3900115f72341fd353e5e185e8983d10534002015-01-21 ffef4b7433ee6008e389a1a4121a82b828123864e563a2afe67dcf29e2b71591\N\N2015-01-21 -PREHOOK: query: -- disable sorted dynamic partition optimization to make sure the results are correct -drop table t1 +PREHOOK: query: drop table t1 PREHOOK: type: DROPTABLE PREHOOK: Input: default@t1 PREHOOK: Output: default@t1 -POSTHOOK: query: -- disable sorted dynamic partition optimization to make sure the results are correct -drop table t1 +POSTHOOK: query: drop table t1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 diff --git a/ql/src/test/results/clientpositive/escape3.q.out b/ql/src/test/results/clientpositive/escape3.q.out index dc53583..06f307c 100644 --- a/ql/src/test/results/clientpositive/escape3.q.out +++ b/ql/src/test/results/clientpositive/escape3.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- with string -CREATE TABLE escape3_1 +PREHOOK: query: CREATE TABLE escape3_1 ( GERUND STRING, ABBREV STRING, @@ -11,8 +10,7 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@escape3_1 -POSTHOOK: query: -- with string -CREATE TABLE escape3_1 +POSTHOOK: query: CREATE TABLE escape3_1 ( GERUND STRING, ABBREV STRING, @@ -45,8 +43,7 @@ writ|ing MD 200 w|aiting |NC 300 seein|g TN| 400 runn|ing WV 500 -PREHOOK: query: -- with varchar -CREATE TABLE escape3_2 +PREHOOK: query: CREATE TABLE escape3_2 ( GERUND VARCHAR(10), ABBREV VARCHAR(3), @@ -58,8 +55,7 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@escape3_2 -POSTHOOK: query: -- with varchar -CREATE TABLE escape3_2 +POSTHOOK: query: CREATE TABLE escape3_2 ( GERUND VARCHAR(10), ABBREV VARCHAR(3), @@ -92,8 +88,7 @@ writ|ing MD 200 w|aiting |NC 300 seein|g TN| 400 runn|ing WV 500 -PREHOOK: query: -- with char -CREATE TABLE escape3_3 +PREHOOK: query: CREATE TABLE escape3_3 ( GERUND CHAR(10), ABBREV CHAR(3), @@ -105,8 +100,7 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@escape3_3 -POSTHOOK: query: -- with char -CREATE TABLE escape3_3 +POSTHOOK: query: CREATE TABLE escape3_3 ( GERUND CHAR(10), ABBREV CHAR(3), diff --git a/ql/src/test/results/clientpositive/escape_clusterby1.q.out b/ql/src/test/results/clientpositive/escape_clusterby1.q.out index fdd9897..ce4e478 100644 --- a/ql/src/test/results/clientpositive/escape_clusterby1.q.out +++ b/ql/src/test/results/clientpositive/escape_clusterby1.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- escaped column names in cluster by are not working jira 3267 -explain +PREHOOK: query: explain select key, value from src cluster by key, value PREHOOK: type: QUERY -POSTHOOK: query: -- escaped column names in cluster by are not working jira 3267 -explain +POSTHOOK: query: explain select key, value from src cluster by key, value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/escape_comments.q.out b/ql/src/test/results/clientpositive/escape_comments.q.out index ff5a1ed..5965c5e 100644 --- a/ql/src/test/results/clientpositive/escape_comments.q.out +++ b/ql/src/test/results/clientpositive/escape_comments.q.out @@ -11,13 +11,13 @@ POSTHOOK: query: use escape_comments_db POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:escape_comments_db PREHOOK: query: create table escape_comments_tbl1 -(col1 string comment 'a\nb\';') comment 'a\nb' +(col1 string comment 'a\nb\'\;') comment 'a\nb' partitioned by (p1 string comment 'a\nb') PREHOOK: type: CREATETABLE PREHOOK: Output: database:escape_comments_db PREHOOK: Output: escape_comments_db@escape_comments_tbl1 POSTHOOK: query: create table escape_comments_tbl1 -(col1 string comment 'a\nb\';') comment 'a\nb' +(col1 string comment 'a\nb\'\;') comment 'a\nb' partitioned by (p1 string comment 'a\nb') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:escape_comments_db diff --git a/ql/src/test/results/clientpositive/escape_crlf.q.out b/ql/src/test/results/clientpositive/escape_crlf.q.out index 3fc246a..8b5df8c 100644 --- a/ql/src/test/results/clientpositive/escape_crlf.q.out +++ b/ql/src/test/results/clientpositive/escape_crlf.q.out @@ -32,25 +32,21 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/escape_crlf.txt' OVERW POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@base_tab -PREHOOK: query: -- No crlf escaping -SELECT * FROM base_tab +PREHOOK: query: SELECT * FROM base_tab PREHOOK: type: QUERY PREHOOK: Input: default@base_tab #### A masked pattern was here #### -POSTHOOK: query: -- No crlf escaping -SELECT * FROM base_tab +POSTHOOK: query: SELECT * FROM base_tab POSTHOOK: type: QUERY POSTHOOK: Input: default@base_tab #### A masked pattern was here #### This\nis\rthe first\r\nmulti-line field field1-2 This\nis\rthe second\r\nmulti-line field field2-2 -PREHOOK: query: -- Crlf escaping -ALTER TABLE base_tab SET SERDEPROPERTIES ('escape.delim'='\\', 'serialization.escape.crlf'='true') +PREHOOK: query: ALTER TABLE base_tab SET SERDEPROPERTIES ('escape.delim'='\\', 'serialization.escape.crlf'='true') PREHOOK: type: ALTERTABLE_SERDEPROPERTIES PREHOOK: Input: default@base_tab PREHOOK: Output: default@base_tab -POSTHOOK: query: -- Crlf escaping -ALTER TABLE base_tab SET SERDEPROPERTIES ('escape.delim'='\\', 'serialization.escape.crlf'='true') +POSTHOOK: query: ALTER TABLE base_tab SET SERDEPROPERTIES ('escape.delim'='\\', 'serialization.escape.crlf'='true') POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES POSTHOOK: Input: default@base_tab POSTHOOK: Output: default@base_tab @@ -70,13 +66,11 @@ This is the second multi-line field field2-2 -PREHOOK: query: -- Make sure intermediate serde works correctly -SELECT * FROM base_tab +PREHOOK: query: SELECT * FROM base_tab PREHOOK: type: QUERY PREHOOK: Input: default@base_tab #### A masked pattern was here #### -POSTHOOK: query: -- Make sure intermediate serde works correctly -SELECT * FROM base_tab +POSTHOOK: query: SELECT * FROM base_tab POSTHOOK: type: QUERY POSTHOOK: Input: default@base_tab #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/escape_distributeby1.q.out b/ql/src/test/results/clientpositive/escape_distributeby1.q.out index 770eacf..6226061 100644 --- a/ql/src/test/results/clientpositive/escape_distributeby1.q.out +++ b/ql/src/test/results/clientpositive/escape_distributeby1.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- escaped column names in distribute by by are not working jira 3267 -explain +PREHOOK: query: explain select key, value from src distribute by key, value PREHOOK: type: QUERY -POSTHOOK: query: -- escaped column names in distribute by by are not working jira 3267 -explain +POSTHOOK: query: explain select key, value from src distribute by key, value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/escape_orderby1.q.out b/ql/src/test/results/clientpositive/escape_orderby1.q.out index 88ef43a..e2937cb 100644 --- a/ql/src/test/results/clientpositive/escape_orderby1.q.out +++ b/ql/src/test/results/clientpositive/escape_orderby1.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- escaped column names in order by are not working jira 3267 -explain +PREHOOK: query: explain select key, value from src order by key, value PREHOOK: type: QUERY -POSTHOOK: query: -- escaped column names in order by are not working jira 3267 -explain +POSTHOOK: query: explain select key, value from src order by key, value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/escape_sortby1.q.out b/ql/src/test/results/clientpositive/escape_sortby1.q.out index de83be2..0ce9699 100644 --- a/ql/src/test/results/clientpositive/escape_sortby1.q.out +++ b/ql/src/test/results/clientpositive/escape_sortby1.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- escaped column names in sort by are not working jira 3267 -explain +PREHOOK: query: explain select key, value from src sort by key, value PREHOOK: type: QUERY -POSTHOOK: query: -- escaped column names in sort by are not working jira 3267 -explain +POSTHOOK: query: explain select key, value from src sort by key, value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/exchange_partition3.q.out b/ql/src/test/results/clientpositive/exchange_partition3.q.out index b7c62e2..d351b09 100644 --- a/ql/src/test/results/clientpositive/exchange_partition3.q.out +++ b/ql/src/test/results/clientpositive/exchange_partition3.q.out @@ -62,13 +62,11 @@ POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@exchange_part_test2 ds=2013-04-05/hr=1 ds=2013-04-05/hr=2 -PREHOOK: query: -- This will exchange both partitions hr=1 and hr=2 -ALTER TABLE exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE exchange_part_test2 +PREHOOK: query: ALTER TABLE exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE exchange_part_test2 PREHOOK: type: ALTERTABLE_EXCHANGEPARTITION PREHOOK: Input: default@exchange_part_test2 PREHOOK: Output: default@exchange_part_test1 -POSTHOOK: query: -- This will exchange both partitions hr=1 and hr=2 -ALTER TABLE exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE exchange_part_test2 +POSTHOOK: query: ALTER TABLE exchange_part_test1 EXCHANGE PARTITION (ds='2013-04-05') WITH TABLE exchange_part_test2 POSTHOOK: type: ALTERTABLE_EXCHANGEPARTITION POSTHOOK: Input: default@exchange_part_test2 POSTHOOK: Input: default@exchange_part_test2@ds=2013-04-05/hr=1 diff --git a/ql/src/test/results/clientpositive/exim_25_export_parentpath_has_inaccessible_children.q.out b/ql/src/test/results/clientpositive/exim_25_export_parentpath_has_inaccessible_children.q.out index 4d0aa66..06c38fe 100644 --- a/ql/src/test/results/clientpositive/exim_25_export_parentpath_has_inaccessible_children.q.out +++ b/ql/src/test/results/clientpositive/exim_25_export_parentpath_has_inaccessible_children.q.out @@ -6,17 +6,11 @@ POSTHOOK: query: create table t_exppath ( dep_id int) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t_exppath -PREHOOK: query: -- this test tests HIVE-10022, by showing that we can output to a directory that does not yet exist --- even if we do not have permissions to other subdirs of the parent dir - -load data local inpath "../../data/files/test.dat" into table t_exppath +PREHOOK: query: load data local inpath "../../data/files/test.dat" into table t_exppath PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@t_exppath -POSTHOOK: query: -- this test tests HIVE-10022, by showing that we can output to a directory that does not yet exist --- even if we do not have permissions to other subdirs of the parent dir - -load data local inpath "../../data/files/test.dat" into table t_exppath +POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table t_exppath POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t_exppath diff --git a/ql/src/test/results/clientpositive/explain_ddl.q.out b/ql/src/test/results/clientpositive/explain_ddl.q.out index 2b89f28..6da67e7 100644 --- a/ql/src/test/results/clientpositive/explain_ddl.q.out +++ b/ql/src/test/results/clientpositive/explain_ddl.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- This test is used for testing explain for DDL/DML statements - --- Create some views and tabels -CREATE VIEW V1 AS SELECT key, value from src +PREHOOK: query: CREATE VIEW V1 AS SELECT key, value from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@V1 -POSTHOOK: query: -- This test is used for testing explain for DDL/DML statements - --- Create some views and tabels -CREATE VIEW V1 AS SELECT key, value from src +POSTHOOK: query: CREATE VIEW V1 AS SELECT key, value from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/explain_dependency.q.out b/ql/src/test/results/clientpositive/explain_dependency.q.out index dbfc482..1b18a32 100644 --- a/ql/src/test/results/clientpositive/explain_dependency.q.out +++ b/ql/src/test/results/clientpositive/explain_dependency.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- This test is used for testing EXPLAIN DEPENDENCY command - --- Create some views -CREATE VIEW V1 AS SELECT key, value from src +PREHOOK: query: CREATE VIEW V1 AS SELECT key, value from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@V1 -POSTHOOK: query: -- This test is used for testing EXPLAIN DEPENDENCY command - --- Create some views -CREATE VIEW V1 AS SELECT key, value from src +POSTHOOK: query: CREATE VIEW V1 AS SELECT key, value from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default @@ -62,12 +56,10 @@ POSTHOOK: Input: default@v1 POSTHOOK: Input: default@v2 POSTHOOK: Output: database:default POSTHOOK: Output: default@V4 -PREHOOK: query: -- Simple select queries, union queries and join queries -EXPLAIN DEPENDENCY +PREHOOK: query: EXPLAIN DEPENDENCY SELECT key, count(1) FROM srcpart WHERE ds IS NOT NULL GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- Simple select queries, union queries and join queries -EXPLAIN DEPENDENCY +POSTHOOK: query: EXPLAIN DEPENDENCY SELECT key, count(1) FROM srcpart WHERE ds IS NOT NULL GROUP BY key POSTHOOK: type: QUERY {"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}],"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}]} @@ -96,11 +88,9 @@ POSTHOOK: query: EXPLAIN DEPENDENCY SELECT S1.key, S2.value FROM src S1 JOIN srcpart S2 ON S1.key = S2.key WHERE ds IS NOT NULL POSTHOOK: type: QUERY {"input_tables":[{"tablename":"default@src","tabletype":"MANAGED_TABLE"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}],"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}]} -PREHOOK: query: -- With views -EXPLAIN DEPENDENCY SELECT * FROM V1 +PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V1 PREHOOK: type: QUERY -POSTHOOK: query: -- With views -EXPLAIN DEPENDENCY SELECT * FROM V1 +POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V1 POSTHOOK: type: QUERY {"input_tables":[{"tablename":"default@v1","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@src","tabletype":"MANAGED_TABLE","tableParents":"[default@v1]"}],"input_partitions":[]} PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V2 @@ -118,16 +108,12 @@ PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM V4 POSTHOOK: type: QUERY {"input_tables":[{"tablename":"default@v4","tabletype":"VIRTUAL_VIEW"},{"tablename":"default@v1","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v4]"},{"tablename":"default@v2","tabletype":"VIRTUAL_VIEW","tableParents":"[default@v4]"},{"tablename":"default@src","tabletype":"MANAGED_TABLE","tableParents":"[default@v4, default@v1]"},{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE","tableParents":"[default@v2]"}],"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}]} -PREHOOK: query: -- The table should show up in the explain dependency even if none --- of the partitions are selected. -CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10' +PREHOOK: query: CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10' PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart PREHOOK: Output: database:default PREHOOK: Output: default@V5 -POSTHOOK: query: -- The table should show up in the explain dependency even if none --- of the partitions are selected. -CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10' +POSTHOOK: query: CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10' POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/explain_dependency2.q.out b/ql/src/test/results/clientpositive/explain_dependency2.q.out index 20bed87..c2ee3f5 100644 --- a/ql/src/test/results/clientpositive/explain_dependency2.q.out +++ b/ql/src/test/results/clientpositive/explain_dependency2.q.out @@ -1,55 +1,31 @@ -PREHOOK: query: -- This test is used for testing EXPLAIN DEPENDENCY command - --- select from a table which does not involve a map-reduce job -EXPLAIN DEPENDENCY SELECT * FROM src +PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- This test is used for testing EXPLAIN DEPENDENCY command - --- select from a table which does not involve a map-reduce job -EXPLAIN DEPENDENCY SELECT * FROM src +POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM src POSTHOOK: type: QUERY {"input_tables":[{"tablename":"default@src","tabletype":"MANAGED_TABLE"}],"input_partitions":[]} -PREHOOK: query: -- select from a table which involves a map-reduce job -EXPLAIN DEPENDENCY SELECT count(*) FROM src +PREHOOK: query: EXPLAIN DEPENDENCY SELECT count(*) FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- select from a table which involves a map-reduce job -EXPLAIN DEPENDENCY SELECT count(*) FROM src +POSTHOOK: query: EXPLAIN DEPENDENCY SELECT count(*) FROM src POSTHOOK: type: QUERY {"input_tables":[{"tablename":"default@src","tabletype":"MANAGED_TABLE"}],"input_partitions":[]} -PREHOOK: query: -- select from a partitioned table which does not involve a map-reduce job --- and some partitions are being selected -EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds is not null +PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds is not null PREHOOK: type: QUERY -POSTHOOK: query: -- select from a partitioned table which does not involve a map-reduce job --- and some partitions are being selected -EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds is not null +POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds is not null POSTHOOK: type: QUERY {"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}],"input_partitions":[{"partitionName":"default@srcpart@ds=2008-04-08/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-08/hr=12"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=11"},{"partitionName":"default@srcpart@ds=2008-04-09/hr=12"}]} -PREHOOK: query: -- select from a partitioned table which does not involve a map-reduce job --- and none of the partitions are being selected -EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds = '1' +PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- select from a partitioned table which does not involve a map-reduce job --- and none of the partitions are being selected -EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds = '1' +POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM srcpart where ds = '1' POSTHOOK: type: QUERY {"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}],"input_partitions":[]} -PREHOOK: query: -- select from a partitioned table which involves a map-reduce job --- and some partitions are being selected -EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds is not null +PREHOOK: query: EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds is not null PREHOOK: type: QUERY -POSTHOOK: query: -- select from a partitioned table which involves a map-reduce job --- and some partitions are being selected -EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds is not null +POSTHOOK: query: EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds is not null POSTHOOK: type: QUERY {"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}],"input_partitions":[]} -PREHOOK: query: -- select from a partitioned table which involves a map-reduce job --- and none of the partitions are being selected -EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds = '1' +PREHOOK: query: EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- select from a partitioned table which involves a map-reduce job --- and none of the partitions are being selected -EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds = '1' +POSTHOOK: query: EXPLAIN DEPENDENCY SELECT count(*) FROM srcpart where ds = '1' POSTHOOK: type: QUERY {"input_tables":[{"tablename":"default@srcpart","tabletype":"MANAGED_TABLE"}],"input_partitions":[]} PREHOOK: query: create table tstsrcpart like srcpart @@ -60,17 +36,13 @@ POSTHOOK: query: create table tstsrcpart like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tstsrcpart -PREHOOK: query: -- select from a partitioned table with no partitions which does not involve a map-reduce job -EXPLAIN DEPENDENCY SELECT * FROM tstsrcpart where ds is not null +PREHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM tstsrcpart where ds is not null PREHOOK: type: QUERY -POSTHOOK: query: -- select from a partitioned table with no partitions which does not involve a map-reduce job -EXPLAIN DEPENDENCY SELECT * FROM tstsrcpart where ds is not null +POSTHOOK: query: EXPLAIN DEPENDENCY SELECT * FROM tstsrcpart where ds is not null POSTHOOK: type: QUERY {"input_tables":[{"tablename":"default@tstsrcpart","tabletype":"MANAGED_TABLE"}],"input_partitions":[]} -PREHOOK: query: -- select from a partitioned table with no partitions which involves a map-reduce job -EXPLAIN DEPENDENCY SELECT count(*) FROM tstsrcpart where ds is not null +PREHOOK: query: EXPLAIN DEPENDENCY SELECT count(*) FROM tstsrcpart where ds is not null PREHOOK: type: QUERY -POSTHOOK: query: -- select from a partitioned table with no partitions which involves a map-reduce job -EXPLAIN DEPENDENCY SELECT count(*) FROM tstsrcpart where ds is not null +POSTHOOK: query: EXPLAIN DEPENDENCY SELECT count(*) FROM tstsrcpart where ds is not null POSTHOOK: type: QUERY {"input_tables":[{"tablename":"default@tstsrcpart","tabletype":"MANAGED_TABLE"}],"input_partitions":[]} diff --git a/ql/src/test/results/clientpositive/explain_logical.q.out b/ql/src/test/results/clientpositive/explain_logical.q.out index 79a3050..90f7aca 100644 --- a/ql/src/test/results/clientpositive/explain_logical.q.out +++ b/ql/src/test/results/clientpositive/explain_logical.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- This test is used for testing EXPLAIN LOGICAL command - --- Create some views -CREATE VIEW V1 AS SELECT key, value from src +PREHOOK: query: CREATE VIEW V1 AS SELECT key, value from src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@V1 -POSTHOOK: query: -- This test is used for testing EXPLAIN LOGICAL command - --- Create some views -CREATE VIEW V1 AS SELECT key, value from src +POSTHOOK: query: CREATE VIEW V1 AS SELECT key, value from src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default @@ -62,12 +56,10 @@ POSTHOOK: Input: default@v1 POSTHOOK: Input: default@v2 POSTHOOK: Output: database:default POSTHOOK: Output: default@V4 -PREHOOK: query: -- Simple select queries, union queries and join queries -EXPLAIN LOGICAL +PREHOOK: query: EXPLAIN LOGICAL SELECT key, count(1) FROM srcpart WHERE ds IS NOT NULL GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- Simple select queries, union queries and join queries -EXPLAIN LOGICAL +POSTHOOK: query: EXPLAIN LOGICAL SELECT key, count(1) FROM srcpart WHERE ds IS NOT NULL GROUP BY key POSTHOOK: type: QUERY LOGICAL PLAN: @@ -253,11 +245,9 @@ $hdt$_1:s2 outputColumnNames: _col0, _col2 Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE -PREHOOK: query: -- With views -EXPLAIN LOGICAL SELECT * FROM V1 +PREHOOK: query: EXPLAIN LOGICAL SELECT * FROM V1 PREHOOK: type: QUERY -POSTHOOK: query: -- With views -EXPLAIN LOGICAL SELECT * FROM V1 +POSTHOOK: query: EXPLAIN LOGICAL SELECT * FROM V1 POSTHOOK: type: QUERY LOGICAL PLAN: src @@ -462,16 +452,12 @@ $hdt$_2:src3 outputColumnNames: _col1, _col2, _col4 Statistics: Num rows: 4400 Data size: 46745 Basic stats: COMPLETE Column stats: NONE -PREHOOK: query: -- The table should show up in the explain logical even if none --- of the partitions are selected. -CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10' +PREHOOK: query: CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10' PREHOOK: type: CREATEVIEW PREHOOK: Input: default@srcpart PREHOOK: Output: database:default PREHOOK: Output: default@V5 -POSTHOOK: query: -- The table should show up in the explain logical even if none --- of the partitions are selected. -CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10' +POSTHOOK: query: CREATE VIEW V5 as SELECT * FROM srcpart where ds = '10' POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@srcpart POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/explain_rearrange.q.out b/ql/src/test/results/clientpositive/explain_rearrange.q.out index 7c8458a..2e5f9e4 100644 --- a/ql/src/test/results/clientpositive/explain_rearrange.q.out +++ b/ql/src/test/results/clientpositive/explain_rearrange.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- query from auto_sortmerge_join_9.q - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tbl1 -POSTHOOK: query: -- query from auto_sortmerge_join_9.q - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tbl1 @@ -18,9 +14,7 @@ POSTHOOK: query: CREATE TABLE tbl2(key int, value string) CLUSTERED BY (key) SOR POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tbl2 -PREHOOK: query: -- default behavior - -explain +PREHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -38,9 +32,7 @@ join on src1.key = src2.key order by src1.key, src1.cnt1, src2.cnt1 PREHOOK: type: QUERY -POSTHOOK: query: -- default behavior - -explain +POSTHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -291,9 +283,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- changes id only - -explain +PREHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -311,9 +301,7 @@ join on src1.key = src2.key order by src1.key, src1.cnt1, src2.cnt1 PREHOOK: type: QUERY -POSTHOOK: query: -- changes id only - -explain +POSTHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -564,9 +552,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- assign ids in traverse order - -explain +PREHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -584,9 +570,7 @@ join on src1.key = src2.key order by src1.key, src1.cnt1, src2.cnt1 PREHOOK: type: QUERY -POSTHOOK: query: -- assign ids in traverse order - -explain +POSTHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -837,9 +821,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- assign ids in execution order - -explain +PREHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -857,9 +839,7 @@ join on src1.key = src2.key order by src1.key, src1.cnt1, src2.cnt1 PREHOOK: type: QUERY -POSTHOOK: query: -- assign ids in execution order - -explain +POSTHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from diff --git a/ql/src/test/results/clientpositive/expr_cached.q.out b/ql/src/test/results/clientpositive/expr_cached.q.out index 9f0b651..454ca33 100644 --- a/ql/src/test/results/clientpositive/expr_cached.q.out +++ b/ql/src/test/results/clientpositive/expr_cached.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- should return a value -select * from src tablesample (1 rows) where length(key) <> reverse(key) +PREHOOK: query: select * from src tablesample (1 rows) where length(key) <> reverse(key) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- should return a value -select * from src tablesample (1 rows) where length(key) <> reverse(key) +POSTHOOK: query: select * from src tablesample (1 rows) where length(key) <> reverse(key) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/extract.q.out b/ql/src/test/results/clientpositive/extract.q.out index aee4427..42178e4 100644 --- a/ql/src/test/results/clientpositive/extract.q.out +++ b/ql/src/test/results/clientpositive/extract.q.out @@ -71,13 +71,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@extract_udf #### A masked pattern was here #### 6 -PREHOOK: query: -- new syntax -explain +PREHOOK: query: explain select extract(day from t) from extract_udf PREHOOK: type: QUERY -POSTHOOK: query: -- new syntax -explain +POSTHOOK: query: explain select extract(day from t) from extract_udf POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out index ef20f4e..e900edd 100644 --- a/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out +++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out @@ -84,11 +84,9 @@ POSTHOOK: Input: default@loc_orc_1d # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment state string 0 3 0.75 2 from deserializer -PREHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL -explain extended select state from loc_orc_1d +PREHOOK: query: explain extended select state from loc_orc_1d PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL -explain extended select state from loc_orc_1d +POSTHOOK: query: explain extended select state from loc_orc_1d POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -195,13 +193,9 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 510 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL --- basicStatState: COMPLETE colStatState: PARTIAL -explain extended select state,locid from loc_orc_1d +PREHOOK: query: explain extended select state,locid from loc_orc_1d PREHOOK: type: QUERY -POSTHOOK: query: -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL --- basicStatState: COMPLETE colStatState: PARTIAL -explain extended select state,locid from loc_orc_1d +POSTHOOK: query: explain extended select state,locid from loc_orc_1d POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out index 995fdf1..8a7341f 100644 --- a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out +++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out @@ -101,11 +101,9 @@ POSTHOOK: Input: default@loc_orc_1d # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment state string 0 6 3.0 3 from deserializer -PREHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL -explain extended select state from loc_orc_1d +PREHOOK: query: explain extended select state from loc_orc_1d PREHOOK: type: QUERY -POSTHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL -explain extended select state from loc_orc_1d +POSTHOOK: query: explain extended select state from loc_orc_1d POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -298,13 +296,9 @@ STAGE PLANS: Statistics: Num rows: 20 Data size: 1780 Basic stats: COMPLETE Column stats: PARTIAL ListSink -PREHOOK: query: -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL --- basicStatState: COMPLETE colStatState: PARTIAL -explain extended select state,locid from loc_orc_1d +PREHOOK: query: explain extended select state,locid from loc_orc_1d PREHOOK: type: QUERY -POSTHOOK: query: -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL --- basicStatState: COMPLETE colStatState: PARTIAL -explain extended select state,locid from loc_orc_1d +POSTHOOK: query: explain extended select state,locid from loc_orc_1d POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/filter_join_breaktask.q.out b/ql/src/test/results/clientpositive/filter_join_breaktask.q.out index 12fb29a..6571822 100644 --- a/ql/src/test/results/clientpositive/filter_join_breaktask.q.out +++ b/ql/src/test/results/clientpositive/filter_join_breaktask.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string) +PREHOOK: query: CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@filter_join_breaktask -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string) +POSTHOOK: query: CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@filter_join_breaktask diff --git a/ql/src/test/results/clientpositive/filter_join_breaktask2.q.out b/ql/src/test/results/clientpositive/filter_join_breaktask2.q.out index af85af9..6a2396e 100644 --- a/ql/src/test/results/clientpositive/filter_join_breaktask2.q.out +++ b/ql/src/test/results/clientpositive/filter_join_breaktask2.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) +PREHOOK: query: create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) +POSTHOOK: query: create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/filter_numeric.q.out b/ql/src/test/results/clientpositive/filter_numeric.q.out index d1965df..f2a9b5e 100644 --- a/ql/src/test/results/clientpositive/filter_numeric.q.out +++ b/ql/src/test/results/clientpositive/filter_numeric.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_AND_HASH_QUERY_RESULTS - -create table partint(key string, value string) partitioned by (ds string, hr int) +PREHOOK: query: create table partint(key string, value string) partitioned by (ds string, hr int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@partint -POSTHOOK: query: -- SORT_AND_HASH_QUERY_RESULTS - -create table partint(key string, value string) partitioned by (ds string, hr int) +POSTHOOK: query: create table partint(key string, value string) partitioned by (ds string, hr int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@partint diff --git a/ql/src/test/results/clientpositive/floor_time.q.out b/ql/src/test/results/clientpositive/floor_time.q.out index f0cb324..a09e85a 100644 --- a/ql/src/test/results/clientpositive/floor_time.q.out +++ b/ql/src/test/results/clientpositive/floor_time.q.out @@ -82,13 +82,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@extract_udf #### A masked pattern was here #### 2011-05-06 00:00:00 -PREHOOK: query: -- new syntax -explain +PREHOOK: query: explain select floor(t to day) from extract_udf PREHOOK: type: QUERY -POSTHOOK: query: -- new syntax -explain +POSTHOOK: query: explain select floor(t to day) from extract_udf POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/fouter_join_ppr.q.out b/ql/src/test/results/clientpositive/fouter_join_ppr.q.out index 048ab96..66ee139 100644 --- a/ql/src/test/results/clientpositive/fouter_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/fouter_join_ppr.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED FROM src a FULL OUTER JOIN @@ -9,9 +7,7 @@ EXPLAIN EXTENDED SELECT a.key, a.value, b.key, b.value WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED FROM src a FULL OUTER JOIN diff --git a/ql/src/test/results/clientpositive/groupby1.q.out b/ql/src/test/results/clientpositive/groupby1.q.out index 785c078..46e09dd 100644 --- a/ql/src/test/results/clientpositive/groupby1.q.out +++ b/ql/src/test/results/clientpositive/groupby1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_g1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_g1 diff --git a/ql/src/test/results/clientpositive/groupby10.q.out b/ql/src/test/results/clientpositive/groupby10.q.out index 5297107..66832b0 100644 --- a/ql/src/test/results/clientpositive/groupby10.q.out +++ b/ql/src/test/results/clientpositive/groupby10.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, val1 INT, val2 INT) +PREHOOK: query: CREATE TABLE dest1(key INT, val1 INT, val2 INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, val1 INT, val2 INT) +POSTHOOK: query: CREATE TABLE dest1(key INT, val1 INT, val2 INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 @@ -524,16 +520,12 @@ POSTHOOK: Input: default@dest2 66 66 66 86 86 86 98 98 98 -PREHOOK: query: -- HIVE-3852 Multi-groupby optimization fails when same distinct column is used twice or more - -EXPLAIN +PREHOOK: query: EXPLAIN FROM INPUT INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), avg(distinct substr(INPUT.value,5)) GROUP BY INPUT.key PREHOOK: type: QUERY -POSTHOOK: query: -- HIVE-3852 Multi-groupby optimization fails when same distinct column is used twice or more - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM INPUT INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), avg(distinct substr(INPUT.value,5)) GROUP BY INPUT.key diff --git a/ql/src/test/results/clientpositive/groupby11.q.out b/ql/src/test/results/clientpositive/groupby11.q.out index 86568e8..1d0e86a 100644 --- a/ql/src/test/results/clientpositive/groupby11.q.out +++ b/ql/src/test/results/clientpositive/groupby11.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, val1 INT, val2 INT) partitioned by (ds string) +PREHOOK: query: CREATE TABLE dest1(key STRING, val1 INT, val2 INT) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, val1 INT, val2 INT) partitioned by (ds string) +POSTHOOK: query: CREATE TABLE dest1(key STRING, val1 INT, val2 INT) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/groupby1_limit.q.out b/ql/src/test/results/clientpositive/groupby1_limit.q.out index aacd23c..78a49eb 100644 --- a/ql/src/test/results/clientpositive/groupby1_limit.q.out +++ b/ql/src/test/results/clientpositive/groupby1_limit.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/groupby1_map.q.out b/ql/src/test/results/clientpositive/groupby1_map.q.out index 7cdf240..cc985a5 100644 --- a/ql/src/test/results/clientpositive/groupby1_map.q.out +++ b/ql/src/test/results/clientpositive/groupby1_map.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/groupby1_map_nomap.q.out b/ql/src/test/results/clientpositive/groupby1_map_nomap.q.out index 7cdf240..cc985a5 100644 --- a/ql/src/test/results/clientpositive/groupby1_map_nomap.q.out +++ b/ql/src/test/results/clientpositive/groupby1_map_nomap.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/groupby1_map_skew.q.out b/ql/src/test/results/clientpositive/groupby1_map_skew.q.out index be7eeca..116744a 100644 --- a/ql/src/test/results/clientpositive/groupby1_map_skew.q.out +++ b/ql/src/test/results/clientpositive/groupby1_map_skew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/groupby1_noskew.q.out b/ql/src/test/results/clientpositive/groupby1_noskew.q.out index ef1fd79..98c0d3c 100644 --- a/ql/src/test/results/clientpositive/groupby1_noskew.q.out +++ b/ql/src/test/results/clientpositive/groupby1_noskew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_g1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_g1 diff --git a/ql/src/test/results/clientpositive/groupby2_map.q.out b/ql/src/test/results/clientpositive/groupby2_map.q.out index c157ba7..0dcd810 100644 --- a/ql/src/test/results/clientpositive/groupby2_map.q.out +++ b/ql/src/test/results/clientpositive/groupby2_map.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/groupby2_map_multi_distinct.q.out b/ql/src/test/results/clientpositive/groupby2_map_multi_distinct.q.out index afbcb7f..64477db 100644 --- a/ql/src/test/results/clientpositive/groupby2_map_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/groupby2_map_multi_distinct.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 @@ -112,15 +108,11 @@ POSTHOOK: Input: default@dest1 7 6 7735.0 447 10 8 8 8762.0 595 10 9 7 91047.0 577 12 -PREHOOK: query: -- HIVE-5560 when group by key is used in distinct funtion, invalid result are returned - -EXPLAIN +PREHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY -POSTHOOK: query: -- HIVE-5560 when group by key is used in distinct funtion, invalid result are returned - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/groupby2_noskew.q.out b/ql/src/test/results/clientpositive/groupby2_noskew.q.out index d3aff42..5192db3 100644 --- a/ql/src/test/results/clientpositive/groupby2_noskew.q.out +++ b/ql/src/test/results/clientpositive/groupby2_noskew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_g2 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_g2 diff --git a/ql/src/test/results/clientpositive/groupby2_noskew_multi_distinct.q.out b/ql/src/test/results/clientpositive/groupby2_noskew_multi_distinct.q.out index 2527fcd..1c24213 100644 --- a/ql/src/test/results/clientpositive/groupby2_noskew_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/groupby2_noskew_multi_distinct.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_g2 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_g2 diff --git a/ql/src/test/results/clientpositive/groupby4.q.out b/ql/src/test/results/clientpositive/groupby4.q.out index 55313fa..3f77e47 100644 --- a/ql/src/test/results/clientpositive/groupby4.q.out +++ b/ql/src/test/results/clientpositive/groupby4.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/groupby4_noskew.q.out b/ql/src/test/results/clientpositive/groupby4_noskew.q.out index d1936da..c7db0d7 100644 --- a/ql/src/test/results/clientpositive/groupby4_noskew.q.out +++ b/ql/src/test/results/clientpositive/groupby4_noskew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/groupby5.q.out b/ql/src/test/results/clientpositive/groupby5.q.out index 946d685..d6efd2c 100644 --- a/ql/src/test/results/clientpositive/groupby5.q.out +++ b/ql/src/test/results/clientpositive/groupby5.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/groupby5_noskew.q.out b/ql/src/test/results/clientpositive/groupby5_noskew.q.out index 8de96dc..3c1688c 100644 --- a/ql/src/test/results/clientpositive/groupby5_noskew.q.out +++ b/ql/src/test/results/clientpositive/groupby5_noskew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/groupby6.q.out b/ql/src/test/results/clientpositive/groupby6.q.out index d8cb2ac..b790224 100644 --- a/ql/src/test/results/clientpositive/groupby6.q.out +++ b/ql/src/test/results/clientpositive/groupby6.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/groupby6_map.q.out b/ql/src/test/results/clientpositive/groupby6_map.q.out index b307eba..4ba3772 100644 --- a/ql/src/test/results/clientpositive/groupby6_map.q.out +++ b/ql/src/test/results/clientpositive/groupby6_map.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/groupby6_map_skew.q.out b/ql/src/test/results/clientpositive/groupby6_map_skew.q.out index c5af47d..5141c0d 100644 --- a/ql/src/test/results/clientpositive/groupby6_map_skew.q.out +++ b/ql/src/test/results/clientpositive/groupby6_map_skew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/groupby6_noskew.q.out b/ql/src/test/results/clientpositive/groupby6_noskew.q.out index 43c7400..fd796c7 100644 --- a/ql/src/test/results/clientpositive/groupby6_noskew.q.out +++ b/ql/src/test/results/clientpositive/groupby6_noskew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/groupby7.q.out b/ql/src/test/results/clientpositive/groupby7.q.out index bb5f62c..ee0153a 100644 --- a/ql/src/test/results/clientpositive/groupby7.q.out +++ b/ql/src/test/results/clientpositive/groupby7.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/groupby7_map.q.out b/ql/src/test/results/clientpositive/groupby7_map.q.out index 3e271c9..29e3113 100644 --- a/ql/src/test/results/clientpositive/groupby7_map.q.out +++ b/ql/src/test/results/clientpositive/groupby7_map.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/groupby7_map_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/groupby7_map_multi_single_reducer.q.out index 49e10c9..377f275 100644 --- a/ql/src/test/results/clientpositive/groupby7_map_multi_single_reducer.q.out +++ b/ql/src/test/results/clientpositive/groupby7_map_multi_single_reducer.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/groupby7_map_skew.q.out b/ql/src/test/results/clientpositive/groupby7_map_skew.q.out index 1477952..bd0f347 100644 --- a/ql/src/test/results/clientpositive/groupby7_map_skew.q.out +++ b/ql/src/test/results/clientpositive/groupby7_map_skew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/groupby7_noskew.q.out b/ql/src/test/results/clientpositive/groupby7_noskew.q.out index a40918c..71321f4 100644 --- a/ql/src/test/results/clientpositive/groupby7_noskew.q.out +++ b/ql/src/test/results/clientpositive/groupby7_noskew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out index 44b664f..8181d8b 100644 --- a/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out +++ b/ql/src/test/results/clientpositive/groupby7_noskew_multi_single_reducer.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/groupby8.q.out b/ql/src/test/results/clientpositive/groupby8.q.out index 0b13817..abf7256 100644 --- a/ql/src/test/results/clientpositive/groupby8.q.out +++ b/ql/src/test/results/clientpositive/groupby8.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/groupby8_map.q.out b/ql/src/test/results/clientpositive/groupby8_map.q.out index db472e9..48b8b34 100644 --- a/ql/src/test/results/clientpositive/groupby8_map.q.out +++ b/ql/src/test/results/clientpositive/groupby8_map.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/groupby8_map_skew.q.out b/ql/src/test/results/clientpositive/groupby8_map_skew.q.out index 3ab8a4c..3b63a40 100644 --- a/ql/src/test/results/clientpositive/groupby8_map_skew.q.out +++ b/ql/src/test/results/clientpositive/groupby8_map_skew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/groupby8_noskew.q.out b/ql/src/test/results/clientpositive/groupby8_noskew.q.out index db472e9..48b8b34 100644 --- a/ql/src/test/results/clientpositive/groupby8_noskew.q.out +++ b/ql/src/test/results/clientpositive/groupby8_noskew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/groupby9.q.out b/ql/src/test/results/clientpositive/groupby9.q.out index 9c5b3b3..c840df8 100644 --- a/ql/src/test/results/clientpositive/groupby9.q.out +++ b/ql/src/test/results/clientpositive/groupby9.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/groupby_complex_types.q.out b/ql/src/test/results/clientpositive/groupby_complex_types.q.out index 4b561a7..b16a4ad 100644 --- a/ql/src/test/results/clientpositive/groupby_complex_types.q.out +++ b/ql/src/test/results/clientpositive/groupby_complex_types.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out index c3d0e52..caa5395 100644 --- a/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out +++ b/ql/src/test/results/clientpositive/groupby_complex_types_multi_single_reducer.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/groupby_cube1.q.out b/ql/src/test/results/clientpositive/groupby_cube1.q.out index 9eea534..0486b68 100644 --- a/ql/src/test/results/clientpositive/groupby_cube1.q.out +++ b/ql/src/test/results/clientpositive/groupby_cube1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 diff --git a/ql/src/test/results/clientpositive/groupby_distinct_samekey.q.out b/ql/src/test/results/clientpositive/groupby_distinct_samekey.q.out index fad93c9..a721b7f 100644 --- a/ql/src/test/results/clientpositive/groupby_distinct_samekey.q.out +++ b/ql/src/test/results/clientpositive/groupby_distinct_samekey.q.out @@ -1,24 +1,16 @@ -PREHOOK: query: -- This test covers HIVE-2332 - -create table t1 (int1 int, int2 int, str1 string, str2 string) +PREHOOK: query: create table t1 (int1 int, int2 int, str1 string, str2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- This test covers HIVE-2332 - -create table t1 (int1 int, int2 int, str1 string, str2 string) +POSTHOOK: query: create table t1 (int1 int, int2 int, str1 string, str2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 -PREHOOK: query: --disabled RS-dedup for keeping intention of test - -insert into table t1 select cast(key as int), cast(key as int), value, value from src where key < 6 +PREHOOK: query: insert into table t1 select cast(key as int), cast(key as int), value, value from src where key < 6 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@t1 -POSTHOOK: query: --disabled RS-dedup for keeping intention of test - -insert into table t1 select cast(key as int), cast(key as int), value, value from src where key < 6 +POSTHOOK: query: insert into table t1 select cast(key as int), cast(key as int), value, value from src where key < 6 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@t1 diff --git a/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out b/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out index c305bfd..915e6e1 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_id3.q.out @@ -14,9 +14,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT key, value, GROUPING__ID, count(*) +PREHOOK: query: SELECT key, value, GROUPING__ID, count(*) FROM T1 GROUP BY key, value GROUPING SETS ((), (key)) @@ -24,9 +22,7 @@ HAVING GROUPING__ID = 1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT key, value, GROUPING__ID, count(*) +POSTHOOK: query: SELECT key, value, GROUPING__ID, count(*) FROM T1 GROUP BY key, value GROUPING SETS ((), (key)) diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out index 39a9e6c..b4f8ce7 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets2.q.out @@ -14,12 +14,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INT POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 -PREHOOK: query: -- Since 4 grouping sets would be generated for the query below, an additional MR job should be created -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a, b, count(*) from T1 group by a, b with cube PREHOOK: type: QUERY -POSTHOOK: query: -- Since 4 grouping sets would be generated for the query below, an additional MR job should be created -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a, b, count(*) from T1 group by a, b with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out index 8428631..67cbdcd 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets3.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- In this test, 2 files are loaded into table T1. The data contains rows with the same value of a and b, --- with different number of rows for a and b in each file. Since bucketizedHiveInputFormat is used, --- this tests that the aggregate function stores the partial aggregate state correctly even if an --- additional MR job is created for processing the grouping sets. -CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- In this test, 2 files are loaded into table T1. The data contains rows with the same value of a and b, --- with different number of rows for a and b in each file. Since bucketizedHiveInputFormat is used, --- this tests that the aggregate function stores the partial aggregate state correctly even if an --- additional MR job is created for processing the grouping sets. -CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 @@ -30,16 +22,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets2.txt' IN POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 -PREHOOK: query: -- The query below will execute in a single MR job, since 4 rows are generated per input row --- (cube of a,b will lead to (a,b), (a, null), (null, b) and (null, null) and --- hive.new.job.grouping.set.cardinality is more than 4. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a, b, avg(c), count(*) from T1 group by a, b with cube PREHOOK: type: QUERY -POSTHOOK: query: -- The query below will execute in a single MR job, since 4 rows are generated per input row --- (cube of a,b will lead to (a,b), (a, null), (null, b) and (null, null) and --- hive.new.job.grouping.set.cardinality is more than 4. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a, b, avg(c), count(*) from T1 group by a, b with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -178,14 +164,10 @@ NULL 3 5.0 2 5 1 2.0 1 8 NULL 1.0 2 8 1 1.0 2 -PREHOOK: query: -- The query below will execute in 2 MR jobs, since hive.new.job.grouping.set.cardinality is set to 2. --- The partial aggregation state should be maintained correctly across MR jobs. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a, b, avg(c), count(*) from T1 group by a, b with cube PREHOOK: type: QUERY -POSTHOOK: query: -- The query below will execute in 2 MR jobs, since hive.new.job.grouping.set.cardinality is set to 2. --- The partial aggregation state should be maintained correctly across MR jobs. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a, b, avg(c), count(*) from T1 group by a, b with cube POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out index f688da3..5884b54 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets4.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- Set merging to false above to make the explain more readable - -CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- Set merging to false above to make the explain more readable - -CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 @@ -22,16 +14,14 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INT POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 -PREHOOK: query: -- This tests that cubes and rollups work fine inside sub-queries. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM (SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 join (SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2 on subq1.a = subq2.a PREHOOK: type: QUERY -POSTHOOK: query: -- This tests that cubes and rollups work fine inside sub-queries. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM (SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 join @@ -347,18 +337,14 @@ POSTHOOK: Input: default@t1 2 NULL 2 2 2 1 2 NULL 2 2 3 1 2 NULL 2 2 NULL 2 -PREHOOK: query: -- Since 4 grouping sets would be generated for each sub-query, an additional MR job should be created --- for each of them -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM (SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 join (SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq2 on subq1.a = subq2.a PREHOOK: type: QUERY -POSTHOOK: query: -- Since 4 grouping sets would be generated for each sub-query, an additional MR job should be created --- for each of them -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM (SELECT a, b, count(*) from T1 where a < 3 group by a, b with cube) subq1 join diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out index de019e3..166f110 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets5.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Set merging to false above to make the explain more readable - -CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- Set merging to false above to make the explain more readable - -CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE T1(a STRING, b STRING, c STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' ' STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 @@ -18,13 +14,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INT POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 -PREHOOK: query: -- This tests that cubes and rollups work fine where the source is a sub-query -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a, b, count(*) FROM (SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube PREHOOK: type: QUERY -POSTHOOK: query: -- This tests that cubes and rollups work fine where the source is a sub-query -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a, b, count(*) FROM (SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube POSTHOOK: type: QUERY @@ -223,13 +217,11 @@ NULL 3 1 5 2 1 8 NULL 1 8 1 1 -PREHOOK: query: -- Since 4 grouping sets would be generated for the cube, an additional MR job should be created -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a, b, count(*) FROM (SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube PREHOOK: type: QUERY -POSTHOOK: query: -- Since 4 grouping sets would be generated for the cube, an additional MR job should be created -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a, b, count(*) FROM (SELECT a, b, count(1) from T1 group by a, b) subq1 group by a, b with cube POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out b/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out index 8166240..16f0871 100644 --- a/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out +++ b/ql/src/test/results/clientpositive/groupby_grouping_sets6.q.out @@ -14,14 +14,12 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/grouping_sets.txt' INT POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 -PREHOOK: query: -- This filter is not pushed down -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a, b FROM (SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5 PREHOOK: type: QUERY -POSTHOOK: query: -- This filter is not pushed down -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a, b FROM (SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5 @@ -85,14 +83,12 @@ POSTHOOK: Input: default@t1 #### A masked pattern was here #### 5 NULL 5 2 -PREHOOK: query: -- This filter is pushed down through aggregate with grouping sets by Calcite -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a, b FROM (SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5 PREHOOK: type: QUERY -POSTHOOK: query: -- This filter is pushed down through aggregate with grouping sets by Calcite -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a, b FROM (SELECT a, b from T1 group by a, b grouping sets ( (a,b),a )) res WHERE res.a=5 diff --git a/ql/src/test/results/clientpositive/groupby_map_ppr.q.out b/ql/src/test/results/clientpositive/groupby_map_ppr.q.out index 24bf7a6..d88a906 100644 --- a/ql/src/test/results/clientpositive/groupby_map_ppr.q.out +++ b/ql/src/test/results/clientpositive/groupby_map_ppr.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out b/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out index c3cb7fb..3f606c9 100644 --- a/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, C3 INT, c4 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, C3 INT, c4 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, C3 INT, c4 INT) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, C3 INT, c4 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out b/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out index c78cfe5..5f02b04 100644 --- a/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out +++ b/ql/src/test/results/clientpositive/groupby_multi_insert_common_distinct.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table dest1(key int, cnt int) +PREHOOK: query: create table dest1(key int, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table dest1(key int, cnt int) +POSTHOOK: query: create table dest1(key int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/groupby_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/groupby_multi_single_reducer.q.out index f5c7c7f..a05c245 100644 --- a/ql/src/test/results/clientpositive/groupby_multi_single_reducer.q.out +++ b/ql/src/test/results/clientpositive/groupby_multi_single_reducer.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_g2 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_g2 diff --git a/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out b/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out index 972ed51..012b211 100644 --- a/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out +++ b/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out @@ -14,16 +14,12 @@ POSTHOOK: query: CREATE TABLE dest_g3(key STRING, c1 INT, c2 INT) STORED AS TEXT POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_g3 -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) INSERT OVERWRITE TABLE dest_g3 SELECT substr(src.key,1,1), count(DISTINCT src.key), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) INSERT OVERWRITE TABLE dest_g3 SELECT substr(src.key,1,1), count(DISTINCT src.key), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1) diff --git a/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out b/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out index c5488de..e41d9ef 100644 --- a/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out +++ b/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- HIVE-3849 Aliased column in where clause for multi-groupby single reducer cannot be resolved - --- SORT_QUERY_RESULTS - -create table e1 (key string, count int) +PREHOOK: query: create table e1 (key string, count int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@e1 -POSTHOOK: query: -- HIVE-3849 Aliased column in where clause for multi-groupby single reducer cannot be resolved - --- SORT_QUERY_RESULTS - -create table e1 (key string, count int) +POSTHOOK: query: create table e1 (key string, count int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@e1 diff --git a/ql/src/test/results/clientpositive/groupby_position.q.out b/ql/src/test/results/clientpositive/groupby_position.q.out index 4276494..689aced 100644 --- a/ql/src/test/results/clientpositive/groupby_position.q.out +++ b/ql/src/test/results/clientpositive/groupby_position.q.out @@ -14,16 +14,12 @@ POSTHOOK: query: CREATE TABLE testTable2(key INT, val1 STRING, val2 STRING) STOR POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@testTable2 -PREHOOK: query: -- Position Alias in GROUP BY and ORDER BY - -EXPLAIN +PREHOOK: query: EXPLAIN FROM SRC INSERT OVERWRITE TABLE testTable1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) WHERE SRC.key < 20 GROUP BY 1 INSERT OVERWRITE TABLE testTable2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) WHERE SRC.key < 20 GROUP BY 1, 2 PREHOOK: type: QUERY -POSTHOOK: query: -- Position Alias in GROUP BY and ORDER BY - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM SRC INSERT OVERWRITE TABLE testTable1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) WHERE SRC.key < 20 GROUP BY 1 INSERT OVERWRITE TABLE testTable2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) WHERE SRC.key < 20 GROUP BY 1, 2 @@ -402,16 +398,12 @@ POSTHOOK: Input: default@testtable2 17 val_17 1 18 val_18 1 19 val_19 1 -PREHOOK: query: -- Position Alias in subquery - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT t.key, t.value FROM (SELECT b.key as key, count(1) as value FROM src b WHERE b.key <= 20 GROUP BY 1) t ORDER BY 2 DESC, 1 ASC PREHOOK: type: QUERY -POSTHOOK: query: -- Position Alias in subquery - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT t.key, t.value FROM (SELECT b.key as key, count(1) as value FROM src b WHERE b.key <= 20 GROUP BY 1) t ORDER BY 2 DESC, 1 ASC diff --git a/ql/src/test/results/clientpositive/groupby_ppd.q.out b/ql/src/test/results/clientpositive/groupby_ppd.q.out index 7a61c3d..024ec6d 100644 --- a/ql/src/test/results/clientpositive/groupby_ppd.q.out +++ b/ql/src/test/results/clientpositive/groupby_ppd.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- see HIVE-2382 -create table invites (id int, foo int, bar int) +PREHOOK: query: create table invites (id int, foo int, bar int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@invites -POSTHOOK: query: -- see HIVE-2382 -create table invites (id int, foo int, bar int) +POSTHOOK: query: create table invites (id int, foo int, bar int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@invites diff --git a/ql/src/test/results/clientpositive/groupby_ppr.q.out b/ql/src/test/results/clientpositive/groupby_ppr.q.out index a4e9ff3..7f4b5f6 100644 --- a/ql/src/test/results/clientpositive/groupby_ppr.q.out +++ b/ql/src/test/results/clientpositive/groupby_ppr.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out b/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out index 6595196..695d18e 100644 --- a/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/groupby_rollup1.q.out b/ql/src/test/results/clientpositive/groupby_rollup1.q.out index 5437315..5fd011e 100644 --- a/ql/src/test/results/clientpositive/groupby_rollup1.q.out +++ b/ql/src/test/results/clientpositive/groupby_rollup1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 diff --git a/ql/src/test/results/clientpositive/groupby_sort_10.q.out b/ql/src/test/results/clientpositive/groupby_sort_10.q.out index 9b8d388..ff75a2a 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_10.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_10.q.out @@ -1,36 +1,28 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='1') +PREHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') SELECT * from src where key = 0 or key = 11 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@t1@ds=1 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='1') +POSTHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') SELECT * from src where key = 0 or key = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@t1@ds=1 POSTHOOK: Lineage: t1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: t1 PARTITION(ds=1).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- The plan is converted to a map-side plan -EXPLAIN select distinct key from T1 +PREHOOK: query: EXPLAIN select distinct key from T1 PREHOOK: type: QUERY -POSTHOOK: query: -- The plan is converted to a map-side plan -EXPLAIN select distinct key from T1 +POSTHOOK: query: EXPLAIN select distinct key from T1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -78,27 +70,21 @@ POSTHOOK: Input: default@t1@ds=1 #### A masked pattern was here #### 0 11 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='2') +PREHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='2') SELECT * from src where key = 0 or key = 11 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@t1@ds=2 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='2') +POSTHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='2') SELECT * from src where key = 0 or key = 11 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@t1@ds=2 POSTHOOK: Lineage: t1 PARTITION(ds=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: t1 PARTITION(ds=2).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- The plan is not converted to a map-side, since although the sorting columns and grouping --- columns match, the user is querying multiple input partitions -EXPLAIN select distinct key from T1 +PREHOOK: query: EXPLAIN select distinct key from T1 PREHOOK: type: QUERY -POSTHOOK: query: -- The plan is not converted to a map-side, since although the sorting columns and grouping --- columns match, the user is querying multiple input partitions -EXPLAIN select distinct key from T1 +POSTHOOK: query: EXPLAIN select distinct key from T1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/groupby_sort_11.q.out b/ql/src/test/results/clientpositive/groupby_sort_11.q.out index f49c53f..2b3bf4a 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_11.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_11.q.out @@ -8,25 +8,21 @@ CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='1') +PREHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') SELECT * from src where key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@t1@ds=1 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='1') +POSTHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') SELECT * from src where key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@t1@ds=1 POSTHOOK: Lineage: t1 PARTITION(ds=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: t1 PARTITION(ds=1).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- The plan is optimized to perform partial aggregation on the mapper -EXPLAIN select count(distinct key) from T1 +PREHOOK: query: EXPLAIN select count(distinct key) from T1 PREHOOK: type: QUERY -POSTHOOK: query: -- The plan is optimized to perform partial aggregation on the mapper -EXPLAIN select count(distinct key) from T1 +POSTHOOK: query: EXPLAIN select count(distinct key) from T1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -86,11 +82,9 @@ POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t1@ds=1 #### A masked pattern was here #### 6 -PREHOOK: query: -- The plan is optimized to perform partial aggregation on the mapper -EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1 +PREHOOK: query: EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1 PREHOOK: type: QUERY -POSTHOOK: query: -- The plan is optimized to perform partial aggregation on the mapper -EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1 +POSTHOOK: query: EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -150,11 +144,9 @@ POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t1@ds=1 #### A masked pattern was here #### 6 10 10 28.0 -PREHOOK: query: -- The plan is not changed in the presence of a grouping key -EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key +PREHOOK: query: EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key PREHOOK: type: QUERY -POSTHOOK: query: -- The plan is not changed in the presence of a grouping key -EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key +POSTHOOK: query: EXPLAIN select count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -225,11 +217,9 @@ POSTHOOK: Input: default@t1@ds=1 1 3 3 5.0 1 1 1 8.0 1 1 1 9.0 -PREHOOK: query: -- The plan is not changed in the presence of a grouping key -EXPLAIN select key, count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key +PREHOOK: query: EXPLAIN select key, count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key PREHOOK: type: QUERY -POSTHOOK: query: -- The plan is not changed in the presence of a grouping key -EXPLAIN select key, count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key +POSTHOOK: query: EXPLAIN select key, count(distinct key), count(1), count(key), sum(distinct key) from T1 group by key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -296,11 +286,9 @@ POSTHOOK: Input: default@t1@ds=1 5 1 3 3 5.0 8 1 1 1 8.0 9 1 1 1 9.0 -PREHOOK: query: -- The plan is not changed in the presence of a grouping key expression -EXPLAIN select count(distinct key+key) from T1 +PREHOOK: query: EXPLAIN select count(distinct key+key) from T1 PREHOOK: type: QUERY -POSTHOOK: query: -- The plan is not changed in the presence of a grouping key expression -EXPLAIN select count(distinct key+key) from T1 +POSTHOOK: query: EXPLAIN select count(distinct key+key) from T1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -416,11 +404,9 @@ POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t1@ds=1 #### A masked pattern was here #### 1 -PREHOOK: query: -- no plan change if map aggr is turned off -EXPLAIN select count(distinct key) from T1 +PREHOOK: query: EXPLAIN select count(distinct key) from T1 PREHOOK: type: QUERY -POSTHOOK: query: -- no plan change if map aggr is turned off -EXPLAIN select count(distinct key) from T1 +POSTHOOK: query: EXPLAIN select count(distinct key) from T1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out b/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out index 6572f6c..c4bfc25 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) --- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) --- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -22,13 +16,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Output: default@t1 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 @@ -42,15 +34,11 @@ POSTHOOK: query: CREATE TABLE outputTbl1(key int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key --- matches the sorted key -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T1 GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key --- matches the sorted key -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T1 GROUP BY key POSTHOOK: type: QUERY @@ -418,13 +406,11 @@ POSTHOOK: query: CREATE TABLE outputTbl2(key1 int, key2 string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl2 -PREHOOK: query: -- no map-side group by even if the group by key is a superset of sorted key -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl2 SELECT key, val, count(1) FROM T1 GROUP BY key, val PREHOOK: type: QUERY -POSTHOOK: query: -- no map-side group by even if the group by key is a superset of sorted key -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl2 SELECT key, val, count(1) FROM T1 GROUP BY key, val POSTHOOK: type: QUERY @@ -615,13 +601,11 @@ POSTHOOK: Input: default@outputtbl2 7 17 1 8 18 1 8 28 1 -PREHOOK: query: -- It should work for sub-queries -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- It should work for sub-queries -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key POSTHOOK: type: QUERY @@ -981,13 +965,11 @@ POSTHOOK: Input: default@outputtbl1 3 1 7 1 8 2 -PREHOOK: query: -- It should work for sub-queries with column aliases -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k PREHOOK: type: QUERY -POSTHOOK: query: -- It should work for sub-queries with column aliases -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k POSTHOOK: type: QUERY @@ -1355,15 +1337,11 @@ POSTHOOK: query: CREATE TABLE outputTbl3(key1 int, key2 int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl3 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant followed --- by a match to the sorted key -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl3 SELECT 1, key, count(1) FROM T1 GROUP BY 1, key PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant followed --- by a match to the sorted key -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl3 SELECT 1, key, count(1) FROM T1 GROUP BY 1, key POSTHOOK: type: QUERY @@ -1732,13 +1710,11 @@ POSTHOOK: query: CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt in POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl4 -PREHOOK: query: -- no map-side group by if the group by key contains a constant followed by another column -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val PREHOOK: type: QUERY -POSTHOOK: query: -- no map-side group by if the group by key contains a constant followed by another column -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val POSTHOOK: type: QUERY @@ -1930,13 +1906,11 @@ POSTHOOK: Input: default@outputtbl4 7 1 17 1 8 1 18 1 8 1 28 1 -PREHOOK: query: -- no map-side group by if the group by key contains a function -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl3 SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 PREHOOK: type: QUERY -POSTHOOK: query: -- no map-side group by if the group by key contains a function -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl3 SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 POSTHOOK: type: QUERY @@ -2126,21 +2100,13 @@ POSTHOOK: Input: default@outputtbl3 3 4 1 7 8 1 8 9 2 -PREHOOK: query: -- it should not matter what follows the group by --- test various cases - --- group by followed by another group by -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key + key, sum(cnt) from (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 group by key + key PREHOOK: type: QUERY -POSTHOOK: query: -- it should not matter what follows the group by --- test various cases - --- group by followed by another group by -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key + key, sum(cnt) from (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 @@ -2345,8 +2311,7 @@ POSTHOOK: Input: default@outputtbl1 2 1 4 1 6 1 -PREHOOK: query: -- group by followed by a union -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( SELECT key, count(1) FROM T1 GROUP BY key @@ -2354,8 +2319,7 @@ SELECT key, count(1) FROM T1 GROUP BY key SELECT key, count(1) FROM T1 GROUP BY key ) subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a union -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( SELECT key, count(1) FROM T1 GROUP BY key @@ -2785,8 +2749,7 @@ POSTHOOK: Input: default@outputtbl1 7 1 8 2 8 2 -PREHOOK: query: -- group by followed by a union where one of the sub-queries is map-side group by -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( SELECT key, count(1) FROM T1 GROUP BY key @@ -2794,8 +2757,7 @@ SELECT key, count(1) FROM T1 GROUP BY key SELECT cast(key + key as string) as key, count(1) FROM T1 GROUP BY key + key ) subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a union where one of the sub-queries is map-side group by -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( SELECT key, count(1) FROM T1 GROUP BY key @@ -3344,8 +3306,7 @@ POSTHOOK: Input: default@outputtbl1 6 1 7 1 8 2 -PREHOOK: query: -- group by followed by a join -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT subq1.key, subq1.cnt+subq2.cnt FROM (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 @@ -3353,8 +3314,7 @@ JOIN (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 ON subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a join -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT subq1.key, subq1.cnt+subq2.cnt FROM (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 @@ -3588,16 +3548,14 @@ POSTHOOK: Input: default@outputtbl1 3 2 7 2 8 4 -PREHOOK: query: -- group by followed by a join where one of the sub-queries can be performed in the mapper -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM (SELECT key, count(1) FROM T1 GROUP BY key) subq1 JOIN (SELECT key, val, count(1) FROM T1 GROUP BY key, val) subq2 ON subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a join where one of the sub-queries can be performed in the mapper -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM (SELECT key, count(1) FROM T1 GROUP BY key) subq1 JOIN @@ -3883,25 +3841,21 @@ CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T2 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T2 select key, val from T1 +PREHOOK: query: INSERT OVERWRITE TABLE T2 select key, val from T1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Output: default@t2 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T2 select key, val from T1 +POSTHOOK: query: INSERT OVERWRITE TABLE T2 select key, val from T1 POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t2 POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: -- no mapside sort group by if the group by is a prefix of the sorted key -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T2 GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- no mapside sort group by if the group by is a prefix of the sorted key -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T2 GROUP BY key POSTHOOK: type: QUERY @@ -4091,15 +4045,11 @@ POSTHOOK: Input: default@outputtbl1 3 1 7 1 8 2 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val POSTHOOK: type: QUERY @@ -4470,15 +4420,11 @@ POSTHOOK: query: CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 i POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl5 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys followed by anything -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl5 SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys followed by anything -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl5 SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 POSTHOOK: type: QUERY @@ -4844,15 +4790,13 @@ POSTHOOK: Input: default@outputtbl5 7 1 17 2 1 8 1 18 2 1 8 1 28 2 1 -PREHOOK: query: -- contants from sub-queries should work fine -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, constant, val, count(1) from (SELECT key, 1 as constant, val from T2)subq group by key, constant, val PREHOOK: type: QUERY -POSTHOOK: query: -- contants from sub-queries should work fine -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, constant, val, count(1) from (SELECT key, 1 as constant, val from T2)subq @@ -5221,8 +5165,7 @@ POSTHOOK: Input: default@outputtbl4 7 1 17 1 8 1 18 1 8 1 28 1 -PREHOOK: query: -- multiple levels of contants from sub-queries should work fine -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 select key, constant3, val, count(1) from ( @@ -5231,8 +5174,7 @@ SELECT key, constant as constant2, val, 2 as constant3 from )subq2 group by key, constant3, val PREHOOK: type: QUERY -POSTHOOK: query: -- multiple levels of contants from sub-queries should work fine -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 select key, constant3, val, count(1) from ( @@ -5781,14 +5723,12 @@ POSTHOOK: Input: default@dest2 7 17 1 8 18 1 8 28 1 -PREHOOK: query: -- multi-table insert with a sub-query -EXPLAIN +PREHOOK: query: EXPLAIN FROM (select key, val from T2 where key = 8) x INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val PREHOOK: type: QUERY -POSTHOOK: query: -- multi-table insert with a sub-query -EXPLAIN +POSTHOOK: query: EXPLAIN FROM (select key, val from T2 where key = 8) x INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val diff --git a/ql/src/test/results/clientpositive/groupby_sort_2.q.out b/ql/src/test/results/clientpositive/groupby_sort_2.q.out index b5e52f1..de6bf14 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_2.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_2.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (val) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (val) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -20,13 +16,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Output: default@t1 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 @@ -40,15 +34,11 @@ POSTHOOK: query: CREATE TABLE outputTbl1(val string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: -- The plan should not be converted to a map-side group by even though the group by key --- matches the sorted key. -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT val, count(1) FROM T1 GROUP BY val PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should not be converted to a map-side group by even though the group by key --- matches the sorted key. -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT val, count(1) FROM T1 GROUP BY val POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/groupby_sort_3.q.out b/ql/src/test/results/clientpositive/groupby_sort_3.q.out index c16911a..da1db8c 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_3.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_3.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -20,13 +16,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Output: default@t1 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 @@ -40,13 +34,11 @@ POSTHOOK: query: CREATE TABLE outputTbl1(key string, val string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: -- The plan should be converted to a map-side group by -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT key, val, count(1) FROM T1 GROUP BY key, val PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT key, val, count(1) FROM T1 GROUP BY key, val POSTHOOK: type: QUERY @@ -177,13 +169,11 @@ POSTHOOK: query: CREATE TABLE outputTbl2(key string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl2 -PREHOOK: query: -- The plan should be converted to a map-side group by -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl2 SELECT key, count(1) FROM T1 GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl2 SELECT key, count(1) FROM T1 GROUP BY key POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/groupby_sort_4.q.out b/ql/src/test/results/clientpositive/groupby_sort_4.q.out index a6b1c3d..ae2ae66 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_4.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_4.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) CLUSTERED BY (key, val) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) CLUSTERED BY (key, val) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -20,13 +16,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Output: default@t1 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 @@ -40,15 +34,11 @@ POSTHOOK: query: CREATE TABLE outputTbl1(key STRING, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: -- The plan should not be converted to a map-side group by. --- However, there should no hash-based aggregation on the map-side -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T1 GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should not be converted to a map-side group by. --- However, there should no hash-based aggregation on the map-side -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T1 GROUP BY key POSTHOOK: type: QUERY @@ -147,15 +137,11 @@ POSTHOOK: query: CREATE TABLE outputTbl2(key STRING, val STRING, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl2 -PREHOOK: query: -- The plan should not be converted to a map-side group by. --- Hash-based aggregations should be performed on the map-side -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl2 SELECT key, val, count(1) FROM T1 GROUP BY key, val PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should not be converted to a map-side group by. --- Hash-based aggregations should be performed on the map-side -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl2 SELECT key, val, count(1) FROM T1 GROUP BY key, val POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/groupby_sort_5.q.out b/ql/src/test/results/clientpositive/groupby_sort_5.q.out index 369e2b5..40b9769 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_5.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_5.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) CLUSTERED BY (val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) CLUSTERED BY (val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -20,13 +16,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Output: default@t1 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 @@ -40,17 +34,11 @@ POSTHOOK: query: CREATE TABLE outputTbl1(key STRING, val STRING, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: -- The plan should be converted to a map-side group by, since the --- sorting columns and grouping columns match, and all the bucketing columns --- are part of sorting columns -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT key, val, count(1) FROM T1 GROUP BY key, val PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by, since the --- sorting columns and grouping columns match, and all the bucketing columns --- are part of sorting columns -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT key, val, count(1) FROM T1 GROUP BY key, val POSTHOOK: type: QUERY @@ -199,29 +187,21 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Output: default@t1 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 POSTHOOK: Lineage: t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: t1.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: -- The plan should be converted to a map-side group by, since the --- sorting columns and grouping columns match, and all the bucketing columns --- are part of sorting columns -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT key, val, count(1) FROM T1 GROUP BY key, val PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by, since the --- sorting columns and grouping columns match, and all the bucketing columns --- are part of sorting columns -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT key, val, count(1) FROM T1 GROUP BY key, val POSTHOOK: type: QUERY @@ -370,13 +350,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Output: default@t1 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 @@ -390,19 +368,11 @@ POSTHOOK: query: CREATE TABLE outputTbl2(key STRING, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl2 -PREHOOK: query: -- The plan should not be converted to a map-side group by, since although the --- sorting columns and grouping columns match, all the bucketing columns --- are not part of sorting columns. However, no hash map aggregation is required --- on the mapside. -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl2 SELECT key, count(1) FROM T1 GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should not be converted to a map-side group by, since although the --- sorting columns and grouping columns match, all the bucketing columns --- are not part of sorting columns. However, no hash map aggregation is required --- on the mapside. -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl2 SELECT key, count(1) FROM T1 GROUP BY key POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/groupby_sort_6.q.out b/ql/src/test/results/clientpositive/groupby_sort_6.q.out index 9804cb0..843fb3b 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_6.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_6.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 @@ -18,13 +14,11 @@ POSTHOOK: query: CREATE TABLE outputTbl1(key int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: -- The plan should not be converted to a map-side group since no partition is being accessed -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should not be converted to a map-side group since no partition is being accessed -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key POSTHOOK: type: QUERY @@ -168,13 +162,11 @@ POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 POSTHOOK: Output: default@t1@ds=2 -PREHOOK: query: -- The plan should not be converted to a map-side group since no partition is being accessed -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should not be converted to a map-side group since no partition is being accessed -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T1 where ds = '1' GROUP BY key POSTHOOK: type: QUERY @@ -309,15 +301,11 @@ POSTHOOK: query: SELECT * FROM outputTbl1 POSTHOOK: type: QUERY POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### -PREHOOK: query: -- The plan should not be converted to a map-side group since the partition being accessed --- is neither bucketed not sorted -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T1 where ds = '2' GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should not be converted to a map-side group since the partition being accessed --- is neither bucketed not sorted -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T1 where ds = '2' GROUP BY key POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/groupby_sort_7.q.out b/ql/src/test/results/clientpositive/groupby_sort_7.q.out index 7264695..21b0a37 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_7.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_7.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) PARTITIONED BY (ds string) CLUSTERED BY (val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -21,14 +17,12 @@ POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 POSTHOOK: Output: default@t1@ds=1 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1' +PREHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1' PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t1@ds=1 PREHOOK: Output: default@t1@ds=1 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1' +POSTHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t1@ds=1 @@ -43,17 +37,11 @@ POSTHOOK: query: CREATE TABLE outputTbl1(key STRING, val STRING, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: -- The plan should be converted to a map-side group by, since the --- sorting columns and grouping columns match, and all the bucketing columns --- are part of sorting columns -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT key, val, count(1) FROM T1 where ds = '1' GROUP BY key, val PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by, since the --- sorting columns and grouping columns match, and all the bucketing columns --- are part of sorting columns -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT key, val, count(1) FROM T1 where ds = '1' GROUP BY key, val POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/groupby_sort_8.q.out b/ql/src/test/results/clientpositive/groupby_sort_8.q.out index 4949cd4..4faa075 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_8.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_8.q.out @@ -17,30 +17,22 @@ POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 POSTHOOK: Output: default@t1@ds=1 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1' +PREHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1' PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t1@ds=1 PREHOOK: Output: default@t1@ds=1 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1' +POSTHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t1@ds=1 POSTHOOK: Output: default@t1@ds=1 POSTHOOK: Lineage: t1 PARTITION(ds=1).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: t1 PARTITION(ds=1).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: -- The plan is not converted to a map-side, since although the sorting columns and grouping --- columns match, the user is issueing a distinct. --- However, after HIVE-4310, partial aggregation is performed on the mapper -EXPLAIN +PREHOOK: query: EXPLAIN select count(distinct key) from T1 PREHOOK: type: QUERY -POSTHOOK: query: -- The plan is not converted to a map-side, since although the sorting columns and grouping --- columns match, the user is issueing a distinct. --- However, after HIVE-4310, partial aggregation is performed on the mapper -EXPLAIN +POSTHOOK: query: EXPLAIN select count(distinct key) from T1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/groupby_sort_9.q.out b/ql/src/test/results/clientpositive/groupby_sort_9.q.out index 96ff510..aa9e32e 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_9.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_9.q.out @@ -17,14 +17,12 @@ POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 POSTHOOK: Output: default@t1@ds=1 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1' +PREHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1' PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t1@ds=1 PREHOOK: Output: default@t1@ds=1 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1' +POSTHOOK: query: INSERT OVERWRITE TABLE T1 PARTITION (ds='1') select key, val from T1 where ds = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t1@ds=1 @@ -43,14 +41,10 @@ POSTHOOK: Input: default@t1@ds=1 POSTHOOK: Output: default@t1@ds=2 POSTHOOK: Lineage: t1 PARTITION(ds=2).key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: t1 PARTITION(ds=2).val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: -- The plan is not converted to a map-side, since although the sorting columns and grouping --- columns match, the user is querying multiple input partitions -EXPLAIN +PREHOOK: query: EXPLAIN select key, count(1) from T1 group by key PREHOOK: type: QUERY -POSTHOOK: query: -- The plan is not converted to a map-side, since although the sorting columns and grouping --- columns match, the user is querying multiple input partitions -EXPLAIN +POSTHOOK: query: EXPLAIN select key, count(1) from T1 group by key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out b/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out index ce71354..8d9942b 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) --- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) --- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -22,13 +16,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Output: default@t1 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 @@ -42,15 +34,11 @@ POSTHOOK: query: CREATE TABLE outputTbl1(key int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key --- matches the sorted key -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T1 GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key --- matches the sorted key -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T1 GROUP BY key POSTHOOK: type: QUERY @@ -418,13 +406,11 @@ POSTHOOK: query: CREATE TABLE outputTbl2(key1 int, key2 string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl2 -PREHOOK: query: -- no map-side group by even if the group by key is a superset of sorted key -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl2 SELECT key, val, count(1) FROM T1 GROUP BY key, val PREHOOK: type: QUERY -POSTHOOK: query: -- no map-side group by even if the group by key is a superset of sorted key -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl2 SELECT key, val, count(1) FROM T1 GROUP BY key, val POSTHOOK: type: QUERY @@ -681,13 +667,11 @@ POSTHOOK: Input: default@outputtbl2 7 17 1 8 18 1 8 28 1 -PREHOOK: query: -- It should work for sub-queries -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- It should work for sub-queries -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key POSTHOOK: type: QUERY @@ -1047,13 +1031,11 @@ POSTHOOK: Input: default@outputtbl1 3 1 7 1 8 2 -PREHOOK: query: -- It should work for sub-queries with column aliases -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k PREHOOK: type: QUERY -POSTHOOK: query: -- It should work for sub-queries with column aliases -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k POSTHOOK: type: QUERY @@ -1421,15 +1403,11 @@ POSTHOOK: query: CREATE TABLE outputTbl3(key1 int, key2 int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl3 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant followed --- by a match to the sorted key -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl3 SELECT 1, key, count(1) FROM T1 GROUP BY 1, key PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant followed --- by a match to the sorted key -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl3 SELECT 1, key, count(1) FROM T1 GROUP BY 1, key POSTHOOK: type: QUERY @@ -1798,13 +1776,11 @@ POSTHOOK: query: CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt in POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl4 -PREHOOK: query: -- no map-side group by if the group by key contains a constant followed by another column -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val PREHOOK: type: QUERY -POSTHOOK: query: -- no map-side group by if the group by key contains a constant followed by another column -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val POSTHOOK: type: QUERY @@ -2062,13 +2038,11 @@ POSTHOOK: Input: default@outputtbl4 7 1 17 1 8 1 18 1 8 1 28 1 -PREHOOK: query: -- no map-side group by if the group by key contains a function -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl3 SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 PREHOOK: type: QUERY -POSTHOOK: query: -- no map-side group by if the group by key contains a function -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl3 SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 POSTHOOK: type: QUERY @@ -2324,21 +2298,13 @@ POSTHOOK: Input: default@outputtbl3 3 4 1 7 8 1 8 9 2 -PREHOOK: query: -- it should not matter what follows the group by --- test various cases - --- group by followed by another group by -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT cast(key + key as string), sum(cnt) from (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 group by key + key PREHOOK: type: QUERY -POSTHOOK: query: -- it should not matter what follows the group by --- test various cases - --- group by followed by another group by -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT cast(key + key as string), sum(cnt) from (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 @@ -2609,8 +2575,7 @@ POSTHOOK: Input: default@outputtbl1 2 1 4 1 6 1 -PREHOOK: query: -- group by followed by a union -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( SELECT key, count(1) FROM T1 GROUP BY key @@ -2618,8 +2583,7 @@ SELECT key, count(1) FROM T1 GROUP BY key SELECT key, count(1) FROM T1 GROUP BY key ) subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a union -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( SELECT key, count(1) FROM T1 GROUP BY key @@ -3049,8 +3013,7 @@ POSTHOOK: Input: default@outputtbl1 7 1 8 2 8 2 -PREHOOK: query: -- group by followed by a union where one of the sub-queries is map-side group by -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( SELECT key, count(1) FROM T1 GROUP BY key @@ -3058,8 +3021,7 @@ SELECT key, count(1) FROM T1 GROUP BY key SELECT cast(key + key as string) as key, count(1) FROM T1 GROUP BY key + key ) subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a union where one of the sub-queries is map-side group by -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( SELECT key, count(1) FROM T1 GROUP BY key @@ -3674,8 +3636,7 @@ POSTHOOK: Input: default@outputtbl1 6 1 7 1 8 2 -PREHOOK: query: -- group by followed by a join -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT subq1.key, subq1.cnt+subq2.cnt FROM (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 @@ -3683,8 +3644,7 @@ JOIN (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 ON subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a join -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT subq1.key, subq1.cnt+subq2.cnt FROM (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 @@ -3918,16 +3878,14 @@ POSTHOOK: Input: default@outputtbl1 3 2 7 2 8 4 -PREHOOK: query: -- group by followed by a join where one of the sub-queries can be performed in the mapper -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM (SELECT key, count(1) FROM T1 GROUP BY key) subq1 JOIN (SELECT key, val, count(1) FROM T1 GROUP BY key, val) subq2 ON subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a join where one of the sub-queries can be performed in the mapper -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM (SELECT key, count(1) FROM T1 GROUP BY key) subq1 JOIN @@ -4279,25 +4237,21 @@ CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T2 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T2 select key, val from T1 +PREHOOK: query: INSERT OVERWRITE TABLE T2 select key, val from T1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Output: default@t2 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T2 select key, val from T1 +POSTHOOK: query: INSERT OVERWRITE TABLE T2 select key, val from T1 POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t2 POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: -- no mapside sort group by if the group by is a prefix of the sorted key -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T2 GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- no mapside sort group by if the group by is a prefix of the sorted key -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T2 GROUP BY key POSTHOOK: type: QUERY @@ -4553,15 +4507,11 @@ POSTHOOK: Input: default@outputtbl1 3 1 7 1 8 2 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val POSTHOOK: type: QUERY @@ -4932,15 +4882,11 @@ POSTHOOK: query: CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 i POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl5 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys followed by anything -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl5 SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys followed by anything -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl5 SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 POSTHOOK: type: QUERY @@ -5306,15 +5252,13 @@ POSTHOOK: Input: default@outputtbl5 7 1 17 2 1 8 1 18 2 1 8 1 28 2 1 -PREHOOK: query: -- contants from sub-queries should work fine -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, constant, val, count(1) from (SELECT key, 1 as constant, val from T2)subq group by key, constant, val PREHOOK: type: QUERY -POSTHOOK: query: -- contants from sub-queries should work fine -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, constant, val, count(1) from (SELECT key, 1 as constant, val from T2)subq @@ -5683,8 +5627,7 @@ POSTHOOK: Input: default@outputtbl4 7 1 17 1 8 1 18 1 8 1 28 1 -PREHOOK: query: -- multiple levels of contants from sub-queries should work fine -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 select key, constant3, val, count(1) from ( @@ -5693,8 +5636,7 @@ SELECT key, constant as constant2, val, 2 as constant3 from )subq2 group by key, constant3, val PREHOOK: type: QUERY -POSTHOOK: query: -- multiple levels of contants from sub-queries should work fine -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 select key, constant3, val, count(1) from ( @@ -6268,14 +6210,12 @@ POSTHOOK: Input: default@dest2 7 17 1 8 18 1 8 28 1 -PREHOOK: query: -- multi-table insert with a sub-query -EXPLAIN +PREHOOK: query: EXPLAIN FROM (select key, val from T2 where key = 8) x INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val PREHOOK: type: QUERY -POSTHOOK: query: -- multi-table insert with a sub-query -EXPLAIN +POSTHOOK: query: EXPLAIN FROM (select key, val from T2 where key = 8) x INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val diff --git a/ql/src/test/results/clientpositive/groupby_sort_test_1.q.out b/ql/src/test/results/clientpositive/groupby_sort_test_1.q.out index dfe0ff1..5b94c0e 100644 --- a/ql/src/test/results/clientpositive/groupby_sort_test_1.q.out +++ b/ql/src/test/results/clientpositive/groupby_sort_test_1.q.out @@ -16,13 +16,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Output: default@t1 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 @@ -36,15 +34,11 @@ POSTHOOK: query: CREATE TABLE outputTbl1(key int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key --- matches the sorted key. However, in test mode, the group by wont be converted. -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T1 GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key --- matches the sorted key. However, in test mode, the group by wont be converted. -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T1 GROUP BY key POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/having.q.out b/ql/src/test/results/clientpositive/having.q.out index b5ab660..8a63075 100644 --- a/ql/src/test/results/clientpositive/having.q.out +++ b/ql/src/test/results/clientpositive/having.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS -EXPLAIN SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3 +PREHOOK: query: EXPLAIN SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS -EXPLAIN SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3 +POSTHOOK: query: EXPLAIN SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/index_auto.q.out b/ql/src/test/results/clientpositive/index_auto.q.out index ddb1b42..76ed49e 100644 --- a/ql/src/test/results/clientpositive/index_auto.q.out +++ b/ql/src/test/results/clientpositive/index_auto.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- try the query without indexing, with manual indexing, and with automatic indexing --- SORT_QUERY_RESULTS - --- without indexing -SELECT key, value FROM src WHERE key > 80 AND key < 100 +PREHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- try the query without indexing, with manual indexing, and with automatic indexing --- SORT_QUERY_RESULTS - --- without indexing -SELECT key, value FROM src WHERE key > 80 AND key < 100 +POSTHOOK: query: SELECT key, value FROM src WHERE key > 80 AND key < 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### @@ -51,13 +43,10 @@ POSTHOOK: Output: default@default__src_src_index__ POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] POSTHOOK: Lineage: default__src_src_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: -- manual indexing #### A masked pattern was here #### PREHOOK: type: QUERY PREHOOK: Input: default@default__src_src_index__ #### A masked pattern was here #### -POSTHOOK: query: -- manual indexing -#### A masked pattern was here #### POSTHOOK: type: QUERY POSTHOOK: Input: default@default__src_src_index__ #### A masked pattern was here #### @@ -124,11 +113,9 @@ POSTHOOK: Input: default@src 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- automatic indexing -EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 +PREHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 PREHOOK: type: QUERY -POSTHOOK: query: -- automatic indexing -EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 +POSTHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage diff --git a/ql/src/test/results/clientpositive/index_auto_empty.q.out b/ql/src/test/results/clientpositive/index_auto_empty.q.out index bd870c9..0191339 100644 --- a/ql/src/test/results/clientpositive/index_auto_empty.q.out +++ b/ql/src/test/results/clientpositive/index_auto_empty.q.out @@ -1,29 +1,21 @@ -PREHOOK: query: -- Test to ensure that an empty index result is propagated correctly - -CREATE DATABASE it +PREHOOK: query: CREATE DATABASE it PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:it -POSTHOOK: query: -- Test to ensure that an empty index result is propagated correctly - -CREATE DATABASE it +POSTHOOK: query: CREATE DATABASE it POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:it -PREHOOK: query: -- Create temp, and populate it with some values in src. -CREATE TABLE it.temp(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE it.temp(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:it PREHOOK: Output: it@temp -POSTHOOK: query: -- Create temp, and populate it with some values in src. -CREATE TABLE it.temp(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE it.temp(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:it POSTHOOK: Output: it@temp -PREHOOK: query: -- Build an index on it.temp. -CREATE INDEX temp_index ON TABLE it.temp(key) as 'COMPACT' WITH DEFERRED REBUILD +PREHOOK: query: CREATE INDEX temp_index ON TABLE it.temp(key) as 'COMPACT' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX PREHOOK: Input: it@temp -POSTHOOK: query: -- Build an index on it.temp. -CREATE INDEX temp_index ON TABLE it.temp(key) as 'COMPACT' WITH DEFERRED REBUILD +POSTHOOK: query: CREATE INDEX temp_index ON TABLE it.temp(key) as 'COMPACT' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX POSTHOOK: Input: it@temp POSTHOOK: Output: it@it__temp_temp_index__ @@ -38,13 +30,11 @@ POSTHOOK: Output: it@it__temp_temp_index__ POSTHOOK: Lineage: it__temp_temp_index__._bucketname SIMPLE [(temp)temp.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] POSTHOOK: Lineage: it__temp_temp_index__._offsets EXPRESSION [(temp)temp.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: it__temp_temp_index__.key SIMPLE [(temp)temp.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: -- query should not return any values -SELECT * FROM it.it__temp_temp_index__ WHERE key = 86 +PREHOOK: query: SELECT * FROM it.it__temp_temp_index__ WHERE key = 86 PREHOOK: type: QUERY PREHOOK: Input: it@it__temp_temp_index__ #### A masked pattern was here #### -POSTHOOK: query: -- query should not return any values -SELECT * FROM it.it__temp_temp_index__ WHERE key = 86 +POSTHOOK: query: SELECT * FROM it.it__temp_temp_index__ WHERE key = 86 POSTHOOK: type: QUERY POSTHOOK: Input: it@it__temp_temp_index__ #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/index_auto_file_format.q.out b/ql/src/test/results/clientpositive/index_auto_file_format.q.out index dee5fc1..21c8085 100644 --- a/ql/src/test/results/clientpositive/index_auto_file_format.q.out +++ b/ql/src/test/results/clientpositive/index_auto_file_format.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- test automatic use of index on different file formats -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD +PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX PREHOOK: Input: default@src -POSTHOOK: query: -- SORT_QUERY_RESULTS --- test automatic use of index on different file formats -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD +POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX POSTHOOK: Input: default@src POSTHOOK: Output: default@default__src_src_index__ diff --git a/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out b/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out index 98d9da0..3444efc 100644 --- a/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out +++ b/ql/src/test/results/clientpositive/index_auto_mult_tables.q.out @@ -1,14 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- try the query without indexing, with manual indexing, and with automatic indexing - --- without indexing -EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 +PREHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS --- try the query without indexing, with manual indexing, and with automatic indexing - --- without indexing -EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 +POSTHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out b/ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out index 8edb37e..86cf47a 100644 --- a/ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out +++ b/ql/src/test/results/clientpositive/index_auto_mult_tables_compact.q.out @@ -1,14 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- try the query without indexing, with manual indexing, and with automatic indexing - --- without indexing -EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 +PREHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS --- try the query without indexing, with manual indexing, and with automatic indexing - --- without indexing -EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 +POSTHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -197,11 +189,9 @@ POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04 POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-09,hr=12)._bucketname SIMPLE [(srcpart)srcpart.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-09,hr=12)._offsets EXPRESSION [(srcpart)srcpart.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: default__srcpart_srcpart_index_compact__ PARTITION(ds=2008-04-09,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: -- automatic indexing -EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 +PREHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 PREHOOK: type: QUERY -POSTHOOK: query: -- automatic indexing -EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 +POSTHOOK: query: EXPLAIN SELECT a.key, a.value FROM src a JOIN srcpart b ON (a.key = b.key) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage diff --git a/ql/src/test/results/clientpositive/index_auto_multiple.q.out b/ql/src/test/results/clientpositive/index_auto_multiple.q.out index 21e60c1..dfc2f34 100644 --- a/ql/src/test/results/clientpositive/index_auto_multiple.q.out +++ b/ql/src/test/results/clientpositive/index_auto_multiple.q.out @@ -1,13 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- With multiple indexes, make sure we choose which to use in a consistent order - -CREATE INDEX src_key_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD +PREHOOK: query: CREATE INDEX src_key_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX PREHOOK: Input: default@src -POSTHOOK: query: -- SORT_QUERY_RESULTS --- With multiple indexes, make sure we choose which to use in a consistent order - -CREATE INDEX src_key_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD +POSTHOOK: query: CREATE INDEX src_key_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX POSTHOOK: Input: default@src POSTHOOK: Output: default@default__src_src_key_index__ diff --git a/ql/src/test/results/clientpositive/index_auto_partitioned.q.out b/ql/src/test/results/clientpositive/index_auto_partitioned.q.out index f556369..8c2d6e4 100644 --- a/ql/src/test/results/clientpositive/index_auto_partitioned.q.out +++ b/ql/src/test/results/clientpositive/index_auto_partitioned.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- test automatic use of index on table with partitions -CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD +PREHOOK: query: CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX PREHOOK: Input: default@srcpart -POSTHOOK: query: -- SORT_QUERY_RESULTS --- test automatic use of index on table with partitions -CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD +POSTHOOK: query: CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX POSTHOOK: Input: default@srcpart POSTHOOK: Output: default@default__srcpart_src_part_index__ diff --git a/ql/src/test/results/clientpositive/index_auto_self_join.q.out b/ql/src/test/results/clientpositive/index_auto_self_join.q.out index bbc037a..43af419 100644 --- a/ql/src/test/results/clientpositive/index_auto_self_join.q.out +++ b/ql/src/test/results/clientpositive/index_auto_self_join.q.out @@ -1,12 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- try the query without indexing, with manual indexing, and with automatic indexing - -EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 +PREHOOK: query: EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS --- try the query without indexing, with manual indexing, and with automatic indexing - -EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 +POSTHOOK: query: EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/index_auto_unused.q.out b/ql/src/test/results/clientpositive/index_auto_unused.q.out index e476050..8a270d6 100644 --- a/ql/src/test/results/clientpositive/index_auto_unused.q.out +++ b/ql/src/test/results/clientpositive/index_auto_unused.q.out @@ -1,13 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- test cases where the index should not be used automatically - -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD +PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX PREHOOK: Input: default@src -POSTHOOK: query: -- SORT_QUERY_RESULTS --- test cases where the index should not be used automatically - -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD +POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX POSTHOOK: Input: default@src POSTHOOK: Output: default@default__src_src_index__ @@ -22,11 +16,9 @@ POSTHOOK: Output: default@default__src_src_index__ POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] POSTHOOK: Lineage: default__src_src_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: -- min size too large (src is less than 5G) -EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100 +PREHOOK: query: EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100 PREHOOK: type: QUERY -POSTHOOK: query: -- min size too large (src is less than 5G) -EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100 +POSTHOOK: query: EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -88,11 +80,9 @@ POSTHOOK: Input: default@src 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- max size too small -EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100 +PREHOOK: query: EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100 PREHOOK: type: QUERY -POSTHOOK: query: -- max size too small -EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100 +POSTHOOK: query: EXPLAIN SELECT * FROM src WHERE key > 80 AND key < 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -154,11 +144,9 @@ POSTHOOK: Input: default@src 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- OR predicate not supported by compact indexes -EXPLAIN SELECT * FROM src WHERE key < 10 OR key > 480 +PREHOOK: query: EXPLAIN SELECT * FROM src WHERE key < 10 OR key > 480 PREHOOK: type: QUERY -POSTHOOK: query: -- OR predicate not supported by compact indexes -EXPLAIN SELECT * FROM src WHERE key < 10 OR key > 480 +POSTHOOK: query: EXPLAIN SELECT * FROM src WHERE key < 10 OR key > 480 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -233,12 +221,10 @@ POSTHOOK: Input: default@src 5 val_5 8 val_8 9 val_9 -PREHOOK: query: -- columns are not covered by indexes -DROP INDEX src_index on src +PREHOOK: query: DROP INDEX src_index on src PREHOOK: type: DROPINDEX PREHOOK: Input: default@src -POSTHOOK: query: -- columns are not covered by indexes -DROP INDEX src_index on src +POSTHOOK: query: DROP INDEX src_index on src POSTHOOK: type: DROPINDEX POSTHOOK: Input: default@src PREHOOK: query: CREATE INDEX src_val_index ON TABLE src(value) as 'COMPACT' WITH DEFERRED REBUILD @@ -329,12 +315,10 @@ PREHOOK: Input: default@src POSTHOOK: query: DROP INDEX src_val_index on src POSTHOOK: type: DROPINDEX POSTHOOK: Input: default@src -PREHOOK: query: -- required partitions have not been built yet -CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD +PREHOOK: query: CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX PREHOOK: Input: default@srcpart -POSTHOOK: query: -- required partitions have not been built yet -CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD +POSTHOOK: query: CREATE INDEX src_part_index ON TABLE srcpart(key) as 'COMPACT' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX POSTHOOK: Input: default@srcpart POSTHOOK: Output: default@default__srcpart_src_part_index__ diff --git a/ql/src/test/results/clientpositive/index_auto_update.q.out b/ql/src/test/results/clientpositive/index_auto_update.q.out index 73bd4c7..d3d2ca3 100644 --- a/ql/src/test/results/clientpositive/index_auto_update.q.out +++ b/ql/src/test/results/clientpositive/index_auto_update.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- Test if index is actually being used. - --- Create temp, and populate it with some values in src. -CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@temp -POSTHOOK: query: -- Test if index is actually being used. - --- Create temp, and populate it with some values in src. -CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@temp @@ -22,12 +16,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@temp POSTHOOK: Lineage: temp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: temp.val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Build an index on temp. -CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD +PREHOOK: query: CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX PREHOOK: Input: default@temp -POSTHOOK: query: -- Build an index on temp. -CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD +POSTHOOK: query: CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX POSTHOOK: Input: default@temp POSTHOOK: Output: default@default__temp_temp_index__ @@ -42,11 +34,9 @@ POSTHOOK: Output: default@default__temp_temp_index__ POSTHOOK: Lineage: default__temp_temp_index__._bucketname SIMPLE [(temp)temp.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] POSTHOOK: Lineage: default__temp_temp_index__._offsets EXPRESSION [(temp)temp.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: default__temp_temp_index__.key SIMPLE [(temp)temp.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: -- overwrite temp table so index is out of date -EXPLAIN INSERT OVERWRITE TABLE temp SELECT * FROM src +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE temp SELECT * FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- overwrite temp table so index is out of date -EXPLAIN INSERT OVERWRITE TABLE temp SELECT * FROM src +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE temp SELECT * FROM src POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-6 is a root stage @@ -199,11 +189,9 @@ POSTHOOK: Lineage: default__temp_temp_index__._offsets EXPRESSION [(temp)temp.Fi POSTHOOK: Lineage: default__temp_temp_index__.key SIMPLE [(temp)temp.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: temp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: temp.val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- query should return indexed values -EXPLAIN SELECT * FROM temp WHERE key = 86 +PREHOOK: query: EXPLAIN SELECT * FROM temp WHERE key = 86 PREHOOK: type: QUERY -POSTHOOK: query: -- query should return indexed values -EXPLAIN SELECT * FROM temp WHERE key = 86 +POSTHOOK: query: EXPLAIN SELECT * FROM temp WHERE key = 86 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage diff --git a/ql/src/test/results/clientpositive/index_bitmap.q.out b/ql/src/test/results/clientpositive/index_bitmap.q.out index 3cc8d29..5017027 100644 --- a/ql/src/test/results/clientpositive/index_bitmap.q.out +++ b/ql/src/test/results/clientpositive/index_bitmap.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -DROP INDEX srcpart_index_proj on srcpart +PREHOOK: query: DROP INDEX srcpart_index_proj on srcpart PREHOOK: type: DROPINDEX PREHOOK: Input: default@srcpart -POSTHOOK: query: -- SORT_QUERY_RESULTS - -DROP INDEX srcpart_index_proj on srcpart +POSTHOOK: query: DROP INDEX srcpart_index_proj on srcpart POSTHOOK: type: DROPINDEX POSTHOOK: Input: default@srcpart PREHOOK: query: EXPLAIN diff --git a/ql/src/test/results/clientpositive/index_bitmap1.q.out b/ql/src/test/results/clientpositive/index_bitmap1.q.out index b750407..8f3af66 100644 --- a/ql/src/test/results/clientpositive/index_bitmap1.q.out +++ b/ql/src/test/results/clientpositive/index_bitmap1.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/index_bitmap2.q.out b/ql/src/test/results/clientpositive/index_bitmap2.q.out index 73c5b90..716e3c8 100644 --- a/ql/src/test/results/clientpositive/index_bitmap2.q.out +++ b/ql/src/test/results/clientpositive/index_bitmap2.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/index_bitmap_auto_partitioned.q.out b/ql/src/test/results/clientpositive/index_bitmap_auto_partitioned.q.out index 0864099..773e9f1 100644 --- a/ql/src/test/results/clientpositive/index_bitmap_auto_partitioned.q.out +++ b/ql/src/test/results/clientpositive/index_bitmap_auto_partitioned.q.out @@ -1,13 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- test automatic use of index on table with partitions -CREATE INDEX src_part_index ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD +PREHOOK: query: CREATE INDEX src_part_index ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX PREHOOK: Input: default@srcpart -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- test automatic use of index on table with partitions -CREATE INDEX src_part_index ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD +POSTHOOK: query: CREATE INDEX src_part_index ON TABLE srcpart(key) as 'BITMAP' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX POSTHOOK: Input: default@srcpart POSTHOOK: Output: default@default__srcpart_src_part_index__ diff --git a/ql/src/test/results/clientpositive/index_bitmap_compression.q.out b/ql/src/test/results/clientpositive/index_bitmap_compression.q.out index 662cbcf..b5b9a99 100644 --- a/ql/src/test/results/clientpositive/index_bitmap_compression.q.out +++ b/ql/src/test/results/clientpositive/index_bitmap_compression.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD +PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX PREHOOK: Input: default@src -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD +POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX POSTHOOK: Input: default@src POSTHOOK: Output: default@default__src_src_index__ @@ -21,11 +17,9 @@ POSTHOOK: Lineage: default__src_src_index__._bitmaps EXPRESSION [(src)src.FieldS POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] POSTHOOK: Lineage: default__src_src_index__._offset SIMPLE [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: -- automatic indexing -EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 +PREHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 PREHOOK: type: QUERY -POSTHOOK: query: -- automatic indexing -EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 +POSTHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage diff --git a/ql/src/test/results/clientpositive/index_bitmap_rc.q.out b/ql/src/test/results/clientpositive/index_bitmap_rc.q.out index 4dfe626..046442c 100644 --- a/ql/src/test/results/clientpositive/index_bitmap_rc.q.out +++ b/ql/src/test/results/clientpositive/index_bitmap_rc.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE +PREHOOK: query: CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@srcpart_rc -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE +POSTHOOK: query: CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@srcpart_rc diff --git a/ql/src/test/results/clientpositive/index_compact.q.out b/ql/src/test/results/clientpositive/index_compact.q.out index 757da7f..97d7bac 100644 --- a/ql/src/test/results/clientpositive/index_compact.q.out +++ b/ql/src/test/results/clientpositive/index_compact.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -DROP INDEX srcpart_index_proj on srcpart +PREHOOK: query: DROP INDEX srcpart_index_proj on srcpart PREHOOK: type: DROPINDEX PREHOOK: Input: default@srcpart -POSTHOOK: query: -- SORT_QUERY_RESULTS - -DROP INDEX srcpart_index_proj on srcpart +POSTHOOK: query: DROP INDEX srcpart_index_proj on srcpart POSTHOOK: type: DROPINDEX POSTHOOK: Input: default@srcpart PREHOOK: query: EXPLAIN diff --git a/ql/src/test/results/clientpositive/index_compact_1.q.out b/ql/src/test/results/clientpositive/index_compact_1.q.out index ba312bb..7be9ada 100644 --- a/ql/src/test/results/clientpositive/index_compact_1.q.out +++ b/ql/src/test/results/clientpositive/index_compact_1.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/index_compact_2.q.out b/ql/src/test/results/clientpositive/index_compact_2.q.out index 3ffd7ce..28ba095 100644 --- a/ql/src/test/results/clientpositive/index_compact_2.q.out +++ b/ql/src/test/results/clientpositive/index_compact_2.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE +PREHOOK: query: CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@srcpart_rc -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE +POSTHOOK: query: CREATE TABLE srcpart_rc (key int, value string) PARTITIONED BY (ds string, hr int) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@srcpart_rc diff --git a/ql/src/test/results/clientpositive/index_compact_3.q.out b/ql/src/test/results/clientpositive/index_compact_3.q.out index 931a4c4..14a5927 100644 --- a/ql/src/test/results/clientpositive/index_compact_3.q.out +++ b/ql/src/test/results/clientpositive/index_compact_3.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src_index_test_rc (key int, value string) STORED AS RCFILE +PREHOOK: query: CREATE TABLE src_index_test_rc (key int, value string) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_index_test_rc -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src_index_test_rc (key int, value string) STORED AS RCFILE +POSTHOOK: query: CREATE TABLE src_index_test_rc (key int, value string) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_index_test_rc diff --git a/ql/src/test/results/clientpositive/index_compression.q.out b/ql/src/test/results/clientpositive/index_compression.q.out index 83a84fa..41253a8 100644 --- a/ql/src/test/results/clientpositive/index_compression.q.out +++ b/ql/src/test/results/clientpositive/index_compression.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD +PREHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX PREHOOK: Input: default@src -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD +POSTHOOK: query: CREATE INDEX src_index ON TABLE src(key) as 'COMPACT' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX POSTHOOK: Input: default@src POSTHOOK: Output: default@default__src_src_index__ @@ -20,11 +16,9 @@ POSTHOOK: Output: default@default__src_src_index__ POSTHOOK: Lineage: default__src_src_index__._bucketname SIMPLE [(src)src.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] POSTHOOK: Lineage: default__src_src_index__._offsets EXPRESSION [(src)src.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: default__src_src_index__.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: -- automatic indexing -EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 +PREHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 PREHOOK: type: QUERY -POSTHOOK: query: -- automatic indexing -EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 +POSTHOOK: query: EXPLAIN SELECT key, value FROM src WHERE key > 80 AND key < 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage diff --git a/ql/src/test/results/clientpositive/index_in_db.q.out b/ql/src/test/results/clientpositive/index_in_db.q.out index 5fb8c5b..6d7b0c3 100644 --- a/ql/src/test/results/clientpositive/index_in_db.q.out +++ b/ql/src/test/results/clientpositive/index_in_db.q.out @@ -2,16 +2,10 @@ PREHOOK: query: drop database if exists index_test_db cascade PREHOOK: type: DROPDATABASE POSTHOOK: query: drop database if exists index_test_db cascade POSTHOOK: type: DROPDATABASE -PREHOOK: query: -- Test selecting selecting from a table that is backed by an index --- create table, index in a db, then set default db as current db, and try selecting - -create database index_test_db +PREHOOK: query: create database index_test_db PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:index_test_db -POSTHOOK: query: -- Test selecting selecting from a table that is backed by an index --- create table, index in a db, then set default db as current db, and try selecting - -create database index_test_db +POSTHOOK: query: create database index_test_db POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:index_test_db PREHOOK: query: use index_test_db diff --git a/ql/src/test/results/clientpositive/index_serde.q.out b/ql/src/test/results/clientpositive/index_serde.q.out index a407918..c1d7ea3 100644 --- a/ql/src/test/results/clientpositive/index_serde.q.out +++ b/ql/src/test/results/clientpositive/index_serde.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- Want to ensure we can build and use indices on tables stored with SerDes --- Build the (Avro backed) table -CREATE TABLE doctors +PREHOOK: query: CREATE TABLE doctors ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -32,10 +29,7 @@ TBLPROPERTIES ('avro.schema.literal'='{ PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@doctors -POSTHOOK: query: -- SORT_QUERY_RESULTS --- Want to ensure we can build and use indices on tables stored with SerDes --- Build the (Avro backed) table -CREATE TABLE doctors +POSTHOOK: query: CREATE TABLE doctors ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -83,12 +77,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TAB POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@doctors -PREHOOK: query: -- Create and build an index -CREATE INDEX doctors_index ON TABLE doctors(number) AS 'COMPACT' WITH DEFERRED REBUILD +PREHOOK: query: CREATE INDEX doctors_index ON TABLE doctors(number) AS 'COMPACT' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX PREHOOK: Input: default@doctors -POSTHOOK: query: -- Create and build an index -CREATE INDEX doctors_index ON TABLE doctors(number) AS 'COMPACT' WITH DEFERRED REBUILD +POSTHOOK: query: CREATE INDEX doctors_index ON TABLE doctors(number) AS 'COMPACT' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX POSTHOOK: Input: default@doctors POSTHOOK: Output: default@default__doctors_doctors_index__ diff --git a/ql/src/test/results/clientpositive/index_skewtable.q.out b/ql/src/test/results/clientpositive/index_skewtable.q.out index 972789d..daecb54 100644 --- a/ql/src/test/results/clientpositive/index_skewtable.q.out +++ b/ql/src/test/results/clientpositive/index_skewtable.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- Test creating an index on skewed table - --- Create a skew table -CREATE TABLE kv(key STRING, value STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE kv(key STRING, value STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@kv -POSTHOOK: query: -- Test creating an index on skewed table - --- Create a skew table -CREATE TABLE kv(key STRING, value STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE kv(key STRING, value STRING) SKEWED BY (key) ON ((3), (8)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@kv @@ -20,12 +14,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE kv POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@kv -PREHOOK: query: -- Create and build an index -CREATE INDEX kv_index ON TABLE kv(value) AS 'COMPACT' WITH DEFERRED REBUILD +PREHOOK: query: CREATE INDEX kv_index ON TABLE kv(value) AS 'COMPACT' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX PREHOOK: Input: default@kv -POSTHOOK: query: -- Create and build an index -CREATE INDEX kv_index ON TABLE kv(value) AS 'COMPACT' WITH DEFERRED REBUILD +POSTHOOK: query: CREATE INDEX kv_index ON TABLE kv(value) AS 'COMPACT' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX POSTHOOK: Input: default@kv POSTHOOK: Output: default@default__kv_kv_index__ @@ -69,11 +61,9 @@ POSTHOOK: Output: default@default__kv_kv_index__ POSTHOOK: Lineage: default__kv_kv_index__._bucketname SIMPLE [(kv)kv.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] POSTHOOK: Lineage: default__kv_kv_index__._offsets EXPRESSION [(kv)kv.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: default__kv_kv_index__.value SIMPLE [(kv)kv.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: -- Run a query that uses the index -EXPLAIN SELECT * FROM kv WHERE value > '15' ORDER BY value +PREHOOK: query: EXPLAIN SELECT * FROM kv WHERE value > '15' ORDER BY value PREHOOK: type: QUERY -POSTHOOK: query: -- Run a query that uses the index -EXPLAIN SELECT * FROM kv WHERE value > '15' ORDER BY value +POSTHOOK: query: EXPLAIN SELECT * FROM kv WHERE value > '15' ORDER BY value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage diff --git a/ql/src/test/results/clientpositive/index_stale.q.out b/ql/src/test/results/clientpositive/index_stale.q.out index d4adbee..7883fcc 100644 --- a/ql/src/test/results/clientpositive/index_stale.q.out +++ b/ql/src/test/results/clientpositive/index_stale.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- test that stale indexes are not used - -CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@temp -POSTHOOK: query: -- test that stale indexes are not used - -CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE temp(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@temp @@ -20,12 +16,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@temp POSTHOOK: Lineage: temp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: temp.val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Build an index on temp. -CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD +PREHOOK: query: CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX PREHOOK: Input: default@temp -POSTHOOK: query: -- Build an index on temp. -CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD +POSTHOOK: query: CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX POSTHOOK: Input: default@temp POSTHOOK: Output: default@default__temp_temp_index__ @@ -40,23 +34,19 @@ POSTHOOK: Output: default@default__temp_temp_index__ POSTHOOK: Lineage: default__temp_temp_index__._bucketname SIMPLE [(temp)temp.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] POSTHOOK: Lineage: default__temp_temp_index__._offsets EXPRESSION [(temp)temp.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: default__temp_temp_index__.key SIMPLE [(temp)temp.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: -- overwrite temp table so index is out of date -INSERT OVERWRITE TABLE temp SELECT * FROM src +PREHOOK: query: INSERT OVERWRITE TABLE temp SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@temp -POSTHOOK: query: -- overwrite temp table so index is out of date -INSERT OVERWRITE TABLE temp SELECT * FROM src +POSTHOOK: query: INSERT OVERWRITE TABLE temp SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@temp POSTHOOK: Lineage: temp.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: temp.val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- should return correct results bypassing index -EXPLAIN SELECT * FROM temp WHERE key = 86 +PREHOOK: query: EXPLAIN SELECT * FROM temp WHERE key = 86 PREHOOK: type: QUERY -POSTHOOK: query: -- should return correct results bypassing index -EXPLAIN SELECT * FROM temp WHERE key = 86 +POSTHOOK: query: EXPLAIN SELECT * FROM temp WHERE key = 86 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/index_stale_partitioned.q.out b/ql/src/test/results/clientpositive/index_stale_partitioned.q.out index f2aa0e4..2138c33 100644 --- a/ql/src/test/results/clientpositive/index_stale_partitioned.q.out +++ b/ql/src/test/results/clientpositive/index_stale_partitioned.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- Test if index is actually being used. - --- Create temp, and populate it with some values in src. -CREATE TABLE temp(key STRING, val STRING) PARTITIONED BY (foo string) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE temp(key STRING, val STRING) PARTITIONED BY (foo string) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@temp -POSTHOOK: query: -- Test if index is actually being used. - --- Create temp, and populate it with some values in src. -CREATE TABLE temp(key STRING, val STRING) PARTITIONED BY (foo string) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE temp(key STRING, val STRING) PARTITIONED BY (foo string) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@temp @@ -29,12 +23,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@temp@foo=bar POSTHOOK: Lineage: temp PARTITION(foo=bar).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: temp PARTITION(foo=bar).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Build an index on temp. -CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD +PREHOOK: query: CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX PREHOOK: Input: default@temp -POSTHOOK: query: -- Build an index on temp. -CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD +POSTHOOK: query: CREATE INDEX temp_index ON TABLE temp(key) as 'COMPACT' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX POSTHOOK: Input: default@temp POSTHOOK: Output: default@default__temp_temp_index__ @@ -51,26 +43,22 @@ POSTHOOK: Output: default@default__temp_temp_index__@foo=bar POSTHOOK: Lineage: default__temp_temp_index__ PARTITION(foo=bar)._bucketname SIMPLE [(temp)temp.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] POSTHOOK: Lineage: default__temp_temp_index__ PARTITION(foo=bar)._offsets EXPRESSION [(temp)temp.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: default__temp_temp_index__ PARTITION(foo=bar).key SIMPLE [(temp)temp.FieldSchema(name:key, type:string, comment:null), ] -PREHOOK: query: -- overwrite temp table so index is out of date -INSERT OVERWRITE TABLE temp PARTITION (foo = 'bar') SELECT * FROM src +PREHOOK: query: INSERT OVERWRITE TABLE temp PARTITION (foo = 'bar') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@temp@foo=bar -POSTHOOK: query: -- overwrite temp table so index is out of date -INSERT OVERWRITE TABLE temp PARTITION (foo = 'bar') SELECT * FROM src +POSTHOOK: query: INSERT OVERWRITE TABLE temp PARTITION (foo = 'bar') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@temp@foo=bar POSTHOOK: Lineage: temp PARTITION(foo=bar).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: temp PARTITION(foo=bar).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- query should not return any values -SELECT * FROM default__temp_temp_index__ WHERE key = 86 AND foo='bar' +PREHOOK: query: SELECT * FROM default__temp_temp_index__ WHERE key = 86 AND foo='bar' PREHOOK: type: QUERY PREHOOK: Input: default@default__temp_temp_index__ PREHOOK: Input: default@default__temp_temp_index__@foo=bar #### A masked pattern was here #### -POSTHOOK: query: -- query should not return any values -SELECT * FROM default__temp_temp_index__ WHERE key = 86 AND foo='bar' +POSTHOOK: query: SELECT * FROM default__temp_temp_index__ WHERE key = 86 AND foo='bar' POSTHOOK: type: QUERY POSTHOOK: Input: default@default__temp_temp_index__ POSTHOOK: Input: default@default__temp_temp_index__@foo=bar diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort.q.out index 53903ad..7c9cb74 100644 --- a/ql/src/test/results/clientpositive/infer_bucket_sort.q.out +++ b/ql/src/test/results/clientpositive/infer_bucket_sort.q.out @@ -1,25 +1,17 @@ -PREHOOK: query: -- This tests inferring how data is bucketed/sorted from the operators in the reducer --- and populating that information in partitions' metadata - -CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table -POSTHOOK: query: -- This tests inferring how data is bucketed/sorted from the operators in the reducer --- and populating that information in partitions' metadata - -CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) +POSTHOOK: query: CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table -PREHOOK: query: -- Test group by, should be bucketed and sorted by group by key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, count(*) FROM src GROUP BY key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test group by, should be bucketed and sorted by group by key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, count(*) FROM src GROUP BY key POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -65,14 +57,12 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test group by where a key isn't selected, should not be bucketed or sorted -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, count(*) FROM src GROUP BY key, value PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test group by where a key isn't selected, should not be bucketed or sorted -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, count(*) FROM src GROUP BY key, value POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -118,14 +108,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test join, should be bucketed and sorted by join key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test join, should be bucketed and sorted by join key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -171,14 +159,12 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test join with two keys, should be bucketed and sorted by join keys -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key AND a.value = b.value PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test join with two keys, should be bucketed and sorted by join keys -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key AND a.value = b.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -224,14 +210,12 @@ Bucket Columns: [key, value] Sort Columns: [Order(col:key, order:1), Order(col:value, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test join with two keys and only one selected, should not be bucketed or sorted -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, '1' FROM src a JOIN src b ON a.key = b.key AND a.value = b.value PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test join with two keys and only one selected, should not be bucketed or sorted -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, '1' FROM src a JOIN src b ON a.key = b.key AND a.value = b.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -277,14 +261,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test join on three tables on same key, should be bucketed and sorted by join key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.key = c.key) PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test join on three tables on same key, should be bucketed and sorted by join key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.key = c.key) POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -330,14 +312,12 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test join on three tables on different keys, should be bucketed and sorted by latter key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.value = c.value) PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test join on three tables on different keys, should be bucketed and sorted by latter key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.value = c.value) POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -383,14 +363,12 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test distribute by, should only be bucketed by key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM src DISTRIBUTE BY key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test distribute by, should only be bucketed by key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM src DISTRIBUTE BY key POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -436,14 +414,12 @@ Bucket Columns: [key] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test sort by, should be sorted by key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM src SORT BY key ASC PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test sort by, should be sorted by key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM src SORT BY key ASC POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -489,14 +465,12 @@ Bucket Columns: [] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test sort by desc, should be sorted by key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM src SORT BY key DESC PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test sort by desc, should be sorted by key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM src SORT BY key DESC POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -542,14 +516,12 @@ Bucket Columns: [] Sort Columns: [Order(col:key, order:0)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test cluster by, should be bucketed and sorted by key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM src CLUSTER BY key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test cluster by, should be bucketed and sorted by key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM src CLUSTER BY key POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -595,14 +567,12 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test distribute by and sort by different keys, should be bucketed by one key sorted by the other -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM src DISTRIBUTE BY key SORT BY value PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test distribute by and sort by different keys, should be bucketed by one key sorted by the other -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM src DISTRIBUTE BY key SORT BY value POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -648,14 +618,12 @@ Bucket Columns: [key] Sort Columns: [Order(col:value, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test join in simple subquery, should be bucketed and sorted on key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value from (SELECT a.key, b.value FROM src a JOIN src b ON (a.key = b.key)) subq PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test join in simple subquery, should be bucketed and sorted on key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value from (SELECT a.key, b.value FROM src a JOIN src b ON (a.key = b.key)) subq POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -701,14 +669,12 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test join in simple subquery renaming key column, should be bucketed and sorted on key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT k, value FROM (SELECT a.key as k, b.value FROM src a JOIN src b ON (a.key = b.key)) subq PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test join in simple subquery renaming key column, should be bucketed and sorted on key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT k, value FROM (SELECT a.key as k, b.value FROM src a JOIN src b ON (a.key = b.key)) subq POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -754,14 +720,12 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test group by in simple subquery, should be bucketed and sorted on key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, cnt from (SELECT key, count(*) as cnt FROM src GROUP BY key) subq PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test group by in simple subquery, should be bucketed and sorted on key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, cnt from (SELECT key, count(*) as cnt FROM src GROUP BY key) subq POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -807,14 +771,12 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test group by in simple subquery renaming key column, should be bucketed and sorted on key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT k, cnt FROM (SELECT key as k, count(*) as cnt FROM src GROUP BY key) subq PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test group by in simple subquery renaming key column, should be bucketed and sorted on key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT k, cnt FROM (SELECT key as k, count(*) as cnt FROM src GROUP BY key) subq POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -860,14 +822,12 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test group by in subquery with where outside, should still be bucketed and sorted on key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM (SELECT key, count(1) AS value FROM src group by key) a where key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test group by in subquery with where outside, should still be bucketed and sorted on key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM (SELECT key, count(1) AS value FROM src group by key) a where key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -913,14 +873,12 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test group by in subquery with expression on value, should still be bucketed and sorted on key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value + 1 FROM (SELECT key, count(1) AS value FROM src group by key) a where key < 10 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test group by in subquery with expression on value, should still be bucketed and sorted on key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value + 1 FROM (SELECT key, count(1) AS value FROM src group by key) a where key < 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -966,14 +924,12 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test group by in subquery with lateral view outside, should still be bucketed and sorted -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM (SELECT key FROM src group by key) a lateral view explode(array(1, 2)) value as value PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test group by in subquery with lateral view outside, should still be bucketed and sorted -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM (SELECT key FROM src group by key) a lateral view explode(array(1, 2)) value as value POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -1019,16 +975,12 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test group by in subquery with another group by outside, should be bucketed and sorted by the --- key of the outer group by -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT count(1), value FROM (SELECT key, count(1) as value FROM src group by key) a group by value PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test group by in subquery with another group by outside, should be bucketed and sorted by the --- key of the outer group by -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT count(1), value FROM (SELECT key, count(1) as value FROM src group by key) a group by value POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -1074,16 +1026,12 @@ Bucket Columns: [value] Sort Columns: [Order(col:value, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test group by in subquery with select on outside reordering the columns, should be bucketed and --- sorted by the column the group by key ends up in -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT value, key FROM (SELECT key, count(1) as value FROM src group by key) a PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test group by in subquery with select on outside reordering the columns, should be bucketed and --- sorted by the column the group by key ends up in -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT value, key FROM (SELECT key, count(1) as value FROM src group by key) a POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -1129,14 +1077,12 @@ Bucket Columns: [value] Sort Columns: [Order(col:value, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test group by in subquery followed by distribute by, should only be bucketed by the distribute key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM (SELECT key, count(1) as value FROM src group by key) a distribute by key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test group by in subquery followed by distribute by, should only be bucketed by the distribute key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM (SELECT key, count(1) as value FROM src group by key) a distribute by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -1182,14 +1128,12 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test group by in subquery followed by sort by, should only be sorted by the sort key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM (SELECT key, count(1) as value FROM src group by key) a sort by key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test group by in subquery followed by sort by, should only be sorted by the sort key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM (SELECT key, count(1) as value FROM src group by key) a sort by key POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -1235,14 +1179,12 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test group by in subquery followed by transform script, should not be bucketed or sorted -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT TRANSFORM (a.key, a.value) USING 'cat' AS (key, value) FROM (SELECT key, count(1) AS value FROM src GROUP BY KEY) a PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test group by in subquery followed by transform script, should not be bucketed or sorted -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT TRANSFORM (a.key, a.value) USING 'cat' AS (key, value) FROM (SELECT key, count(1) AS value FROM src GROUP BY KEY) a POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -1288,14 +1230,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test group by on function, should be bucketed and sorted by key and value because the function is applied in the mapper -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM (SELECT concat(key, "a") AS key, value, count(*) FROM src GROUP BY concat(key, "a"), value) a PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test group by on function, should be bucketed and sorted by key and value because the function is applied in the mapper -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value FROM (SELECT concat(key, "a") AS key, value, count(*) FROM src GROUP BY concat(key, "a"), value) a POSTHOOK: type: QUERY POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out index bb9278c..52ebe5a 100644 --- a/ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out +++ b/ql/src/test/results/clientpositive/infer_bucket_sort_convert_join.q.out @@ -1,27 +1,17 @@ -PREHOOK: query: -- This tests inferring how data is bucketed/sorted from the operators in the reducer --- and populating that information in partitions' metadata. In particular, those cases --- where joins may be auto converted to map joins. - -CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table -POSTHOOK: query: -- This tests inferring how data is bucketed/sorted from the operators in the reducer --- and populating that information in partitions' metadata. In particular, those cases --- where joins may be auto converted to map joins. - -CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) +POSTHOOK: query: CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table -PREHOOK: query: -- Tests a join which is converted to a map join, the output should be neither bucketed nor sorted -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Tests a join which is converted to a map join, the output should be neither bucketed nor sorted -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -67,20 +57,14 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- This test tests the scenario when the mapper dies. So, create a conditional task for the mapjoin. --- Tests a join which is not converted to a map join, the output should be bucketed and sorted. - -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 FAILED: Execution Error, return code 3 from org.apache.hadoop.hive.ql.exec.mr.MapredLocalTask ATTEMPT: Execute BackupTask: org.apache.hadoop.hive.ql.exec.mr.MapRedTask -POSTHOOK: query: -- This test tests the scenario when the mapper dies. So, create a conditional task for the mapjoin. --- Tests a join which is not converted to a map join, the output should be bucketed and sorted. - -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out index 84021a3..d740dea 100644 --- a/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out +++ b/ql/src/test/results/clientpositive/infer_bucket_sort_grouping_operators.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- This tests inferring how data is bucketed/sorted from the operators in the reducer --- and populating that information in partitions' metadata, in particular, this tests --- the grouping operators rollup/cube/grouping sets - -CREATE TABLE test_table_out (key STRING, value STRING, agg STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE test_table_out (key STRING, value STRING, agg STRING) PARTITIONED BY (part STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table_out -POSTHOOK: query: -- This tests inferring how data is bucketed/sorted from the operators in the reducer --- and populating that information in partitions' metadata, in particular, this tests --- the grouping operators rollup/cube/grouping sets - -CREATE TABLE test_table_out (key STRING, value STRING, agg STRING) PARTITIONED BY (part STRING) +POSTHOOK: query: CREATE TABLE test_table_out (key STRING, value STRING, agg STRING) PARTITIONED BY (part STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table_out @@ -22,12 +14,10 @@ POSTHOOK: query: CREATE TABLE test_table_out_2 (key STRING, value STRING, groupi POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table_out_2 -PREHOOK: query: -- Test rollup, should not be bucketed or sorted because its missing the grouping ID -EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') SELECT key, value, count(1) FROM src GROUP BY key, value WITH ROLLUP PREHOOK: type: QUERY -POSTHOOK: query: -- Test rollup, should not be bucketed or sorted because its missing the grouping ID -EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') SELECT key, value, count(1) FROM src GROUP BY key, value WITH ROLLUP POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -774,16 +764,12 @@ Bucket Columns: [key, value] Sort Columns: [Order(col:key, order:1), Order(col:value, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test rollup, should be bucketed and sorted on key, value, grouping_key - -INSERT OVERWRITE TABLE test_table_out_2 PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_out_2 PARTITION (part = '1') SELECT key, value, GROUPING__ID, count(1) FROM src GROUP BY key, value WITH ROLLUP PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table_out_2@part=1 -POSTHOOK: query: -- Test rollup, should be bucketed and sorted on key, value, grouping_key - -INSERT OVERWRITE TABLE test_table_out_2 PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_out_2 PARTITION (part = '1') SELECT key, value, GROUPING__ID, count(1) FROM src GROUP BY key, value WITH ROLLUP POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -1460,12 +1446,10 @@ Bucket Columns: [key, value, grouping_key] Sort Columns: [Order(col:key, order:1), Order(col:value, order:1), Order(col:grouping_key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test cube, should not be bucketed or sorted because its missing the grouping ID -EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') SELECT key, value, count(1) FROM src GROUP BY key, value WITH CUBE PREHOOK: type: QUERY -POSTHOOK: query: -- Test cube, should not be bucketed or sorted because its missing the grouping ID -EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') SELECT key, value, count(1) FROM src GROUP BY key, value WITH CUBE POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1585,16 +1569,12 @@ Bucket Columns: [key, value] Sort Columns: [Order(col:key, order:1), Order(col:value, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test cube, should be bucketed and sorted on key, value, grouping_key - -INSERT OVERWRITE TABLE test_table_out_2 PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_out_2 PARTITION (part = '1') SELECT key, value, GROUPING__ID, count(1) FROM src GROUP BY key, value WITH CUBE PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table_out_2@part=1 -POSTHOOK: query: -- Test cube, should be bucketed and sorted on key, value, grouping_key - -INSERT OVERWRITE TABLE test_table_out_2 PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_out_2 PARTITION (part = '1') SELECT key, value, GROUPING__ID, count(1) FROM src GROUP BY key, value WITH CUBE POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -1644,12 +1624,10 @@ Bucket Columns: [key, value, grouping_key] Sort Columns: [Order(col:key, order:1), Order(col:value, order:1), Order(col:grouping_key, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test grouping sets, should not be bucketed or sorted because its missing the grouping ID -EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') SELECT key, value, count(1) FROM src GROUP BY key, value GROUPING SETS (key, value) PREHOOK: type: QUERY -POSTHOOK: query: -- Test grouping sets, should not be bucketed or sorted because its missing the grouping ID -EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') SELECT key, value, count(1) FROM src GROUP BY key, value GROUPING SETS (key, value) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1769,16 +1747,12 @@ Bucket Columns: [key, value] Sort Columns: [Order(col:key, order:1), Order(col:value, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test grouping sets, should be bucketed and sorted on key, value, grouping_key - -INSERT OVERWRITE TABLE test_table_out_2 PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_out_2 PARTITION (part = '1') SELECT key, value, GROUPING__ID, count(1) FROM src GROUP BY key, value GROUPING SETS (key, value) PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table_out_2@part=1 -POSTHOOK: query: -- Test grouping sets, should be bucketed and sorted on key, value, grouping_key - -INSERT OVERWRITE TABLE test_table_out_2 PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_out_2 PARTITION (part = '1') SELECT key, value, GROUPING__ID, count(1) FROM src GROUP BY key, value GROUPING SETS (key, value) POSTHOOK: type: QUERY POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_list_bucket.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_list_bucket.q.out index f45a719..98a2f5f 100644 --- a/ql/src/test/results/clientpositive/infer_bucket_sort_list_bucket.q.out +++ b/ql/src/test/results/clientpositive/infer_bucket_sort_list_bucket.q.out @@ -1,37 +1,23 @@ -PREHOOK: query: -- This tests that bucketing/sorting metadata is not inferred for tables with list bucketing - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - --- create a skewed table -CREATE TABLE list_bucketing_table (key STRING, value STRING) +PREHOOK: query: CREATE TABLE list_bucketing_table (key STRING, value STRING) PARTITIONED BY (part STRING) SKEWED BY (key) ON ("484") STORED AS DIRECTORIES PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_table -POSTHOOK: query: -- This tests that bucketing/sorting metadata is not inferred for tables with list bucketing - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - --- create a skewed table -CREATE TABLE list_bucketing_table (key STRING, value STRING) +POSTHOOK: query: CREATE TABLE list_bucketing_table (key STRING, value STRING) PARTITIONED BY (part STRING) SKEWED BY (key) ON ("484") STORED AS DIRECTORIES POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_table -PREHOOK: query: -- Tests group by, the output should neither be bucketed nor sorted - -INSERT OVERWRITE TABLE list_bucketing_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE list_bucketing_table PARTITION (part = '1') SELECT key, count(*) FROM src GROUP BY key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@list_bucketing_table@part=1 -POSTHOOK: query: -- Tests group by, the output should neither be bucketed nor sorted - -INSERT OVERWRITE TABLE list_bucketing_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE list_bucketing_table PARTITION (part = '1') SELECT key, count(*) FROM src GROUP BY key POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -82,30 +68,26 @@ Skewed Values: [[484]] Skewed Value to Truncated Path: {[484]=/list_bucketing_table/part=1/key=484} Storage Desc Params: serialization.format 1 -PREHOOK: query: -- create a table skewed on a key which doesnt exist in the data -CREATE TABLE list_bucketing_table2 (key STRING, value STRING) +PREHOOK: query: CREATE TABLE list_bucketing_table2 (key STRING, value STRING) PARTITIONED BY (part STRING) SKEWED BY (key) ON ("abc") STORED AS DIRECTORIES PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_table2 -POSTHOOK: query: -- create a table skewed on a key which doesnt exist in the data -CREATE TABLE list_bucketing_table2 (key STRING, value STRING) +POSTHOOK: query: CREATE TABLE list_bucketing_table2 (key STRING, value STRING) PARTITIONED BY (part STRING) SKEWED BY (key) ON ("abc") STORED AS DIRECTORIES POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_table2 -PREHOOK: query: -- should not be bucketed or sorted -INSERT OVERWRITE TABLE list_bucketing_table2 PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE list_bucketing_table2 PARTITION (part = '1') SELECT key, count(*) FROM src GROUP BY key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@list_bucketing_table2@part=1 -POSTHOOK: query: -- should not be bucketed or sorted -INSERT OVERWRITE TABLE list_bucketing_table2 PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE list_bucketing_table2 PARTITION (part = '1') SELECT key, count(*) FROM src GROUP BY key POSTHOOK: type: QUERY POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out index d4c22f4..59b20fe 100644 --- a/ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out +++ b/ql/src/test/results/clientpositive/infer_bucket_sort_multi_insert.q.out @@ -1,31 +1,19 @@ -PREHOOK: query: -- This tests inferring how data is bucketed/sorted from the operators in the reducer --- and populating that information in partitions' metadata. In particular, those cases --- where multi insert is used. - -CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table -POSTHOOK: query: -- This tests inferring how data is bucketed/sorted from the operators in the reducer --- and populating that information in partitions' metadata. In particular, those cases --- where multi insert is used. - -CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) +POSTHOOK: query: CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table -PREHOOK: query: -- Simple case, neither partition should be bucketed or sorted - -FROM src +PREHOOK: query: FROM src INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value INSERT OVERWRITE TABLE test_table PARTITION (part = '2') SELECT value, key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 PREHOOK: Output: default@test_table@part=2 -POSTHOOK: query: -- Simple case, neither partition should be bucketed or sorted - -FROM src +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, value INSERT OVERWRITE TABLE test_table PARTITION (part = '2') SELECT value, key POSTHOOK: type: QUERY @@ -114,18 +102,14 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- The partitions should be bucketed and sorted by different keys - -FROM src +PREHOOK: query: FROM src INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, COUNT(*) GROUP BY key INSERT OVERWRITE TABLE test_table PARTITION (part = '2') SELECT COUNT(*), value GROUP BY value PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 PREHOOK: Output: default@test_table@part=2 -POSTHOOK: query: -- The partitions should be bucketed and sorted by different keys - -FROM src +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, COUNT(*) GROUP BY key INSERT OVERWRITE TABLE test_table PARTITION (part = '2') SELECT COUNT(*), value GROUP BY value POSTHOOK: type: QUERY @@ -214,18 +198,14 @@ Bucket Columns: [value] Sort Columns: [Order(col:value, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- The first partition should be bucketed and sorted, the second should not - -FROM src +PREHOOK: query: FROM src INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, COUNT(*) GROUP BY key INSERT OVERWRITE TABLE test_table PARTITION (part = '2') SELECT key, value PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 PREHOOK: Output: default@test_table@part=2 -POSTHOOK: query: -- The first partition should be bucketed and sorted, the second should not - -FROM src +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, COUNT(*) GROUP BY key INSERT OVERWRITE TABLE test_table PARTITION (part = '2') SELECT key, value POSTHOOK: type: QUERY @@ -314,18 +294,14 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test the multi group by single reducer optimization --- Both partitions should be bucketed by key -FROM src +PREHOOK: query: FROM src INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, COUNT(*) GROUP BY key INSERT OVERWRITE TABLE test_table PARTITION (part = '2') SELECT key, SUM(SUBSTR(value, 5)) GROUP BY key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 PREHOOK: Output: default@test_table@part=2 -POSTHOOK: query: -- Test the multi group by single reducer optimization --- Both partitions should be bucketed by key -FROM src +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, COUNT(*) GROUP BY key INSERT OVERWRITE TABLE test_table PARTITION (part = '2') SELECT key, SUM(SUBSTR(value, 5)) GROUP BY key POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/infer_const_type.q.out b/ql/src/test/results/clientpositive/infer_const_type.q.out index 90ca7e1..08f53ac 100644 --- a/ql/src/test/results/clientpositive/infer_const_type.q.out +++ b/ql/src/test/results/clientpositive/infer_const_type.q.out @@ -103,10 +103,7 @@ POSTHOOK: Input: default@infertypes #### A masked pattern was here #### 127 32767 12345 -12345 906.0 -307.0 1234 WARNING: Comparing a bigint and a string may result in a loss of precision. -PREHOOK: query: -- all should return false as all numbers exceeed the largest number --- which could be represented by the corresponding type --- and string_col = long_const should return false -EXPLAIN SELECT * FROM infertypes WHERE +PREHOOK: query: EXPLAIN SELECT * FROM infertypes WHERE ti = '128' OR si = 32768 OR i = '2147483648' OR @@ -114,10 +111,7 @@ EXPLAIN SELECT * FROM infertypes WHERE fl = 'float' OR db = 'double' PREHOOK: type: QUERY -POSTHOOK: query: -- all should return false as all numbers exceeed the largest number --- which could be represented by the corresponding type --- and string_col = long_const should return false -EXPLAIN SELECT * FROM infertypes WHERE +POSTHOOK: query: EXPLAIN SELECT * FROM infertypes WHERE ti = '128' OR si = 32768 OR i = '2147483648' OR @@ -178,14 +172,12 @@ POSTHOOK: query: SELECT * FROM infertypes WHERE POSTHOOK: type: QUERY POSTHOOK: Input: default@infertypes #### A masked pattern was here #### -PREHOOK: query: -- for the query like: int_col = double, should return false -EXPLAIN SELECT * FROM infertypes WHERE +PREHOOK: query: EXPLAIN SELECT * FROM infertypes WHERE ti = '127.0' OR si = 327.0 OR i = '-100.0' PREHOOK: type: QUERY -POSTHOOK: query: -- for the query like: int_col = double, should return false -EXPLAIN SELECT * FROM infertypes WHERE +POSTHOOK: query: EXPLAIN SELECT * FROM infertypes WHERE ti = '127.0' OR si = 327.0 OR i = '-100.0' diff --git a/ql/src/test/results/clientpositive/infer_join_preds.q.out b/ql/src/test/results/clientpositive/infer_join_preds.q.out index 39d29ba..07f9082 100644 --- a/ql/src/test/results/clientpositive/infer_join_preds.q.out +++ b/ql/src/test/results/clientpositive/infer_join_preds.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain select * from src a join src1 b on a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain select * from src a join src1 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/init_file.q.out b/ql/src/test/results/clientpositive/init_file.q.out index 0f8cf0c..f2a0492 100644 --- a/ql/src/test/results/clientpositive/init_file.q.out +++ b/ql/src/test/results/clientpositive/init_file.q.out @@ -6,17 +6,11 @@ POSTHOOK: query: create table tbl_created_by_init(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tbl_created_by_init -PREHOOK: query: -- tbl_created_by_init is supposed to have been created for us --- automatically by test_init_file.sql - -select * from tbl_created_by_init +PREHOOK: query: select * from tbl_created_by_init PREHOOK: type: QUERY PREHOOK: Input: default@tbl_created_by_init #### A masked pattern was here #### -POSTHOOK: query: -- tbl_created_by_init is supposed to have been created for us --- automatically by test_init_file.sql - -select * from tbl_created_by_init +POSTHOOK: query: select * from tbl_created_by_init POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl_created_by_init #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/innerjoin.q.out b/ql/src/test/results/clientpositive/innerjoin.q.out index 872933e..4a7649d 100644 --- a/ql/src/test/results/clientpositive/innerjoin.q.out +++ b/ql/src/test/results/clientpositive/innerjoin.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 @@ -1141,15 +1137,11 @@ POSTHOOK: Input: default@dest_j1 98 val_98 98 val_98 98 val_98 -PREHOOK: query: -- verify that INNER is a non-reserved word for backwards compatibility --- change from HIVE-6617, inner is a SQL2011 reserved keyword. -create table `inner`(i int) +PREHOOK: query: create table `inner`(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inner -POSTHOOK: query: -- verify that INNER is a non-reserved word for backwards compatibility --- change from HIVE-6617, inner is a SQL2011 reserved keyword. -create table `inner`(i int) +POSTHOOK: query: create table `inner`(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inner diff --git a/ql/src/test/results/clientpositive/input11_limit.q.out b/ql/src/test/results/clientpositive/input11_limit.q.out index 92db5a9..597554e 100644 --- a/ql/src/test/results/clientpositive/input11_limit.q.out +++ b/ql/src/test/results/clientpositive/input11_limit.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/input12.q.out b/ql/src/test/results/clientpositive/input12.q.out index 1557c58..3bb765c 100644 --- a/ql/src/test/results/clientpositive/input12.q.out +++ b/ql/src/test/results/clientpositive/input12.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/input14.q.out b/ql/src/test/results/clientpositive/input14.q.out index f5c5b25..af04a98 100644 --- a/ql/src/test/results/clientpositive/input14.q.out +++ b/ql/src/test/results/clientpositive/input14.q.out @@ -105,15 +105,11 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@dest1 POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT dest1.* FROM dest1 +PREHOOK: query: SELECT dest1.* FROM dest1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1.* FROM dest1 POSTHOOK: type: QUERY POSTHOOK: Input: default@dest1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/input16.q.out b/ql/src/test/results/clientpositive/input16.q.out index 550e78e..a9b454f 100644 --- a/ql/src/test/results/clientpositive/input16.q.out +++ b/ql/src/test/results/clientpositive/input16.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- TestSerDe is a user defined serde where the default delimiter is Ctrl-B -DROP TABLE INPUT16 +PREHOOK: query: DROP TABLE INPUT16 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- TestSerDe is a user defined serde where the default delimiter is Ctrl-B -DROP TABLE INPUT16 +POSTHOOK: query: DROP TABLE INPUT16 POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE INPUT16(KEY STRING, VALUE STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' STORED AS TEXTFILE PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/input17.q.out b/ql/src/test/results/clientpositive/input17.q.out index 1f7e787..057a92d 100644 --- a/ql/src/test/results/clientpositive/input17.q.out +++ b/ql/src/test/results/clientpositive/input17.q.out @@ -102,15 +102,11 @@ POSTHOOK: Input: default@src_thrift POSTHOOK: Output: default@dest1 POSTHOOK: Lineage: dest1.key SCRIPT [(src_thrift)src_thrift.FieldSchema(name:aint, type:int, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lintstring, type:array>, comment:from deserializer), ] POSTHOOK: Lineage: dest1.value SCRIPT [(src_thrift)src_thrift.FieldSchema(name:aint, type:int, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lintstring, type:array>, comment:from deserializer), ] -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT dest1.* FROM dest1 +PREHOOK: query: SELECT dest1.* FROM dest1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1.* FROM dest1 POSTHOOK: type: QUERY POSTHOOK: Input: default@dest1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/input18.q.out b/ql/src/test/results/clientpositive/input18.q.out index 7993cb7..b341510 100644 --- a/ql/src/test/results/clientpositive/input18.q.out +++ b/ql/src/test/results/clientpositive/input18.q.out @@ -105,15 +105,11 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@dest1 POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT dest1.* FROM dest1 +PREHOOK: query: SELECT dest1.* FROM dest1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1.* FROM dest1 POSTHOOK: type: QUERY POSTHOOK: Input: default@dest1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/input1_limit.q.out b/ql/src/test/results/clientpositive/input1_limit.q.out index 0ceb153..0ca1552 100644 --- a/ql/src/test/results/clientpositive/input1_limit.q.out +++ b/ql/src/test/results/clientpositive/input1_limit.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/input39.q.out b/ql/src/test/results/clientpositive/input39.q.out index f48f5f5..e5b42a7 100644 --- a/ql/src/test/results/clientpositive/input39.q.out +++ b/ql/src/test/results/clientpositive/input39.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) - - -create table t1(key string, value string) partitioned by (ds string) +PREHOOK: query: create table t1(key string, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) - - -create table t1(key string, value string) partitioned by (ds string) +POSTHOOK: query: create table t1(key string, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 diff --git a/ql/src/test/results/clientpositive/input40.q.out b/ql/src/test/results/clientpositive/input40.q.out index bb0eabe..fef8db2 100644 --- a/ql/src/test/results/clientpositive/input40.q.out +++ b/ql/src/test/results/clientpositive/input40.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table tmp_insert_test (key string, value string) stored as textfile +PREHOOK: query: create table tmp_insert_test (key string, value string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmp_insert_test -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table tmp_insert_test (key string, value string) stored as textfile +POSTHOOK: query: create table tmp_insert_test (key string, value string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmp_insert_test diff --git a/ql/src/test/results/clientpositive/input42.q.out b/ql/src/test/results/clientpositive/input42.q.out index 8e91af0..adf1523 100644 --- a/ql/src/test/results/clientpositive/input42.q.out +++ b/ql/src/test/results/clientpositive/input42.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain extended +PREHOOK: query: explain extended select * from srcpart a where a.ds='2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain extended +POSTHOOK: query: explain extended select * from srcpart a where a.ds='2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/input_lazyserde.q.out b/ql/src/test/results/clientpositive/input_lazyserde.q.out index d5ad564..64dc6c1 100644 --- a/ql/src/test/results/clientpositive/input_lazyserde.q.out +++ b/ql/src/test/results/clientpositive/input_lazyserde.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE dest1 +PREHOOK: query: DROP TABLE dest1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE dest1 +POSTHOOK: query: DROP TABLE dest1 POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE dest1(a array, b array, c map, d int, e string) ROW FORMAT DELIMITED diff --git a/ql/src/test/results/clientpositive/input_part10.q.out b/ql/src/test/results/clientpositive/input_part10.q.out index 8455bb3..1d738b4 100644 --- a/ql/src/test/results/clientpositive/input_part10.q.out +++ b/ql/src/test/results/clientpositive/input_part10.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- EXCLUDE_OS_WINDOWS --- excluded on windows because of difference in file name encoding logic - -CREATE TABLE part_special ( +PREHOOK: query: CREATE TABLE part_special ( a STRING, b STRING ) PARTITIONED BY ( @@ -11,10 +8,7 @@ CREATE TABLE part_special ( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@part_special -POSTHOOK: query: -- EXCLUDE_OS_WINDOWS --- excluded on windows because of difference in file name encoding logic - -CREATE TABLE part_special ( +POSTHOOK: query: CREATE TABLE part_special ( a STRING, b STRING ) PARTITIONED BY ( diff --git a/ql/src/test/results/clientpositive/input_part2.q.out b/ql/src/test/results/clientpositive/input_part2.q.out index 0c069a5..2500035 100644 --- a/ql/src/test/results/clientpositive/input_part2.q.out +++ b/ql/src/test/results/clientpositive/input_part2.q.out @@ -14,16 +14,12 @@ POSTHOOK: query: CREATE TABLE dest2(key INT, value STRING, hr STRING, ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest2 -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED FROM srcpart INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' INSERT OVERWRITE TABLE dest2 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12' PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED FROM srcpart INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' INSERT OVERWRITE TABLE dest2 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12' diff --git a/ql/src/test/results/clientpositive/input_part9.q.out b/ql/src/test/results/clientpositive/input_part9.q.out index f73d0e1..91da0c5 100644 --- a/ql/src/test/results/clientpositive/input_part9.q.out +++ b/ql/src/test/results/clientpositive/input_part9.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT x.* FROM SRCPART x WHERE key IS NOT NULL AND ds = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT x.* FROM SRCPART x WHERE key IS NOT NULL AND ds = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/inputddl4.q.out b/ql/src/test/results/clientpositive/inputddl4.q.out index d573d31..c942963 100644 --- a/ql/src/test/results/clientpositive/inputddl4.q.out +++ b/ql/src/test/results/clientpositive/inputddl4.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- a simple test to test sorted/clustered syntax - -CREATE TABLE INPUTDDL4(viewTime STRING, userid INT, +PREHOOK: query: CREATE TABLE INPUTDDL4(viewTime STRING, userid INT, page_url STRING, referrer_url STRING, friends ARRAY, properties MAP, ip STRING COMMENT 'IP Address of the User') @@ -10,9 +8,7 @@ CREATE TABLE INPUTDDL4(viewTime STRING, userid INT, PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@INPUTDDL4 -POSTHOOK: query: -- a simple test to test sorted/clustered syntax - -CREATE TABLE INPUTDDL4(viewTime STRING, userid INT, +POSTHOOK: query: CREATE TABLE INPUTDDL4(viewTime STRING, userid INT, page_url STRING, referrer_url STRING, friends ARRAY, properties MAP, ip STRING COMMENT 'IP Address of the User') diff --git a/ql/src/test/results/clientpositive/inputddl5.q.out b/ql/src/test/results/clientpositive/inputddl5.q.out index f3956f0..c39c05d 100644 --- a/ql/src/test/results/clientpositive/inputddl5.q.out +++ b/ql/src/test/results/clientpositive/inputddl5.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- test for internationalization --- kv4.txt contains the utf-8 character 0xE982B5E993AE which we are verifying later on -CREATE TABLE INPUTDDL5(name STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE INPUTDDL5(name STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@INPUTDDL5 -POSTHOOK: query: -- test for internationalization --- kv4.txt contains the utf-8 character 0xE982B5E993AE which we are verifying later on -CREATE TABLE INPUTDDL5(name STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE INPUTDDL5(name STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@INPUTDDL5 diff --git a/ql/src/test/results/clientpositive/inputddl6.q.out b/ql/src/test/results/clientpositive/inputddl6.q.out index 5a040e6..d3a15a0 100644 --- a/ql/src/test/results/clientpositive/inputddl6.q.out +++ b/ql/src/test/results/clientpositive/inputddl6.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- test for describe extended table --- test for describe extended table partition --- test for alter table drop partition -CREATE TABLE INPUTDDL6(KEY STRING, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE INPUTDDL6(KEY STRING, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@INPUTDDL6 -POSTHOOK: query: -- test for describe extended table --- test for describe extended table partition --- test for alter table drop partition -CREATE TABLE INPUTDDL6(KEY STRING, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE INPUTDDL6(KEY STRING, VALUE STRING) PARTITIONED BY(ds STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@INPUTDDL6 diff --git a/ql/src/test/results/clientpositive/inputddl7.q.out b/ql/src/test/results/clientpositive/inputddl7.q.out index 0d64baf..d255b39 100644 --- a/ql/src/test/results/clientpositive/inputddl7.q.out +++ b/ql/src/test/results/clientpositive/inputddl7.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- test for loading into tables with the correct file format --- test for loading into partitions with the correct file format - - -CREATE TABLE T1(name STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1(name STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- test for loading into tables with the correct file format --- test for loading into partitions with the correct file format - - -CREATE TABLE T1(name STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE T1(name STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 diff --git a/ql/src/test/results/clientpositive/insert1.q.out b/ql/src/test/results/clientpositive/insert1.q.out index 7a2c429..3952578 100644 --- a/ql/src/test/results/clientpositive/insert1.q.out +++ b/ql/src/test/results/clientpositive/insert1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table insert1(key int, value string) stored as textfile +PREHOOK: query: create table insert1(key int, value string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@insert1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table insert1(key int, value string) stored as textfile +POSTHOOK: query: create table insert1(key int, value string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@insert1 @@ -206,12 +202,10 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: -- HIVE-3465 -create database x +PREHOOK: query: create database x PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:x -POSTHOOK: query: -- HIVE-3465 -create database x +POSTHOOK: query: create database x POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:x PREHOOK: query: create table x.insert1(key int, value string) stored as textfile @@ -569,12 +563,10 @@ STAGE PLANS: hdfs directory: true #### A masked pattern was here #### -PREHOOK: query: -- HIVE-3676 -CREATE DATABASE db2 +PREHOOK: query: CREATE DATABASE db2 PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:db2 -POSTHOOK: query: -- HIVE-3676 -CREATE DATABASE db2 +POSTHOOK: query: CREATE DATABASE db2 POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:db2 PREHOOK: query: USE db2 diff --git a/ql/src/test/results/clientpositive/insert2.q.out b/ql/src/test/results/clientpositive/insert2.q.out index 3805f3a..7b4dbb8 100644 --- a/ql/src/test/results/clientpositive/insert2.q.out +++ b/ql/src/test/results/clientpositive/insert2.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -USE default +PREHOOK: query: USE default PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:default -POSTHOOK: query: -- SORT_QUERY_RESULTS - -USE default +POSTHOOK: query: USE default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default PREHOOK: query: CREATE DATABASE db1 diff --git a/ql/src/test/results/clientpositive/insert_into1.q.out b/ql/src/test/results/clientpositive/insert_into1.q.out index 2fb7b71..da863a7 100644 --- a/ql/src/test/results/clientpositive/insert_into1.q.out +++ b/ql/src/test/results/clientpositive/insert_into1.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE insert_into1 +PREHOOK: query: DROP TABLE insert_into1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE insert_into1 +POSTHOOK: query: DROP TABLE insert_into1 POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE insert_into1 (key int, value string) PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/insert_into4.q.out b/ql/src/test/results/clientpositive/insert_into4.q.out index 07ff99b..bb4e557 100644 --- a/ql/src/test/results/clientpositive/insert_into4.q.out +++ b/ql/src/test/results/clientpositive/insert_into4.q.out @@ -192,13 +192,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@insert_into4a #### A masked pattern was here #### -1653251832 -PREHOOK: query: --At this point insert_into4a has 2 files (if INSERT INTO merges isn't fixed) - -EXPLAIN INSERT INTO TABLE insert_into4b SELECT * FROM insert_into4a +PREHOOK: query: EXPLAIN INSERT INTO TABLE insert_into4b SELECT * FROM insert_into4a PREHOOK: type: QUERY -POSTHOOK: query: --At this point insert_into4a has 2 files (if INSERT INTO merges isn't fixed) - -EXPLAIN INSERT INTO TABLE insert_into4b SELECT * FROM insert_into4a +POSTHOOK: query: EXPLAIN INSERT INTO TABLE insert_into4b SELECT * FROM insert_into4a POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/insert_into_with_schema2.q.out b/ql/src/test/results/clientpositive/insert_into_with_schema2.q.out index b62eb6f..7be65a6 100644 --- a/ql/src/test/results/clientpositive/insert_into_with_schema2.q.out +++ b/ql/src/test/results/clientpositive/insert_into_with_schema2.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS; - - - -create table studenttab10k (age2 int) +PREHOOK: query: create table studenttab10k (age2 int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@studenttab10k -POSTHOOK: query: -- SORT_QUERY_RESULTS; - - - -create table studenttab10k (age2 int) +POSTHOOK: query: create table studenttab10k (age2 int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@studenttab10k diff --git a/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out b/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out index af410c5..4aded00 100644 --- a/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out +++ b/ql/src/test/results/clientpositive/insert_nonacid_from_acid.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- This test checks that selecting from an acid table and inserting into a non-acid table works. -create table sample_06(name varchar(50), age int, gpa decimal(3, 2)) clustered by (age) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true") +PREHOOK: query: create table sample_06(name varchar(50), age int, gpa decimal(3, 2)) clustered by (age) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@sample_06 -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- This test checks that selecting from an acid table and inserting into a non-acid table works. -create table sample_06(name varchar(50), age int, gpa decimal(3, 2)) clustered by (age) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true") +POSTHOOK: query: create table sample_06(name varchar(50), age int, gpa decimal(3, 2)) clustered by (age) into 2 buckets stored as orc TBLPROPERTIES ("transactional"="true") POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@sample_06 diff --git a/ql/src/test/results/clientpositive/interval_1.q.out b/ql/src/test/results/clientpositive/interval_1.q.out index ae2471b..b8746d1 100644 --- a/ql/src/test/results/clientpositive/interval_1.q.out +++ b/ql/src/test/results/clientpositive/interval_1.q.out @@ -36,15 +36,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 10-11 10-11 10-11 10-11 true -PREHOOK: query: -- Test normalization of interval values -select +PREHOOK: query: select interval '49' month from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Test normalization of interval values -select +POSTHOOK: query: select interval '49' month from src limit 1 POSTHOOK: type: QUERY @@ -95,8 +93,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 2 01:02:03.000000000 2 01:02:03.000000000 2 01:02:03.000000000 2 01:02:03.000000000 true -PREHOOK: query: -- Test normalization of interval values -select +PREHOOK: query: select interval '49' hour, interval '1470' minute, interval '90061.111111111' second @@ -104,8 +101,7 @@ from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Test normalization of interval values -select +POSTHOOK: query: select interval '49' hour, interval '1470' minute, interval '90061.111111111' second diff --git a/ql/src/test/results/clientpositive/interval_2.q.out b/ql/src/test/results/clientpositive/interval_2.q.out index 6c40c53..dd59d5a 100644 --- a/ql/src/test/results/clientpositive/interval_2.q.out +++ b/ql/src/test/results/clientpositive/interval_2.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- group-by/order-by/aggregation functions - -select +PREHOOK: query: select iym, count(*), min(key), max(key), min(iym), max(iym), min(idt), max(idt) from ( select @@ -14,9 +12,7 @@ limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- group-by/order-by/aggregation functions - -select +POSTHOOK: query: select iym, count(*), min(key), max(key), min(iym), max(iym), min(idt), max(idt) from ( select @@ -68,8 +64,7 @@ POSTHOOK: Input: default@src 496-1 1 496 496 496-1 496-1 496 01:01:01.000000000 496 01:01:01.000000000 495-1 1 495 495 495-1 495-1 495 01:01:01.000000000 495 01:01:01.000000000 494-1 1 494 494 494-1 494-1 494 01:01:01.000000000 494 01:01:01.000000000 -PREHOOK: query: -- same query as previous, with having clause -select +PREHOOK: query: select iym, count(*), min(key), max(key), min(iym), max(iym), min(idt), max(idt) from ( select @@ -84,8 +79,7 @@ limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- same query as previous, with having clause -select +POSTHOOK: query: select iym, count(*), min(key), max(key), min(iym), max(iym), min(idt), max(idt) from ( select @@ -169,8 +163,7 @@ POSTHOOK: Input: default@src 496 01:01:01.000000000 1 496 496 496-1 496-1 496 01:01:01.000000000 496 01:01:01.000000000 495 01:01:01.000000000 1 495 495 495-1 495-1 495 01:01:01.000000000 495 01:01:01.000000000 494 01:01:01.000000000 1 494 494 494-1 494-1 494 01:01:01.000000000 494 01:01:01.000000000 -PREHOOK: query: -- same query as previous, with having clause -select +PREHOOK: query: select idt, count(*), min(key), max(key), min(iym), max(iym), min(idt), max(idt) from ( select @@ -185,8 +178,7 @@ limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- same query as previous, with having clause -select +POSTHOOK: query: select idt, count(*), min(key), max(key), min(iym), max(iym), min(idt), max(idt) from ( select diff --git a/ql/src/test/results/clientpositive/interval_3.q.out b/ql/src/test/results/clientpositive/interval_3.q.out index 1404d86..ac71514 100644 --- a/ql/src/test/results/clientpositive/interval_3.q.out +++ b/ql/src/test/results/clientpositive/interval_3.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- where clause -select +PREHOOK: query: select l_orderkey, l_shipdate, l_receiptdate from lineitem where (cast(l_shipdate as date) - date '1992-01-01') < interval '365 0:0:0' day to second @@ -7,8 +6,7 @@ order by l_orderkey PREHOOK: type: QUERY PREHOOK: Input: default@lineitem #### A masked pattern was here #### -POSTHOOK: query: -- where clause -select +POSTHOOK: query: select l_orderkey, l_shipdate, l_receiptdate from lineitem where (cast(l_shipdate as date) - date '1992-01-01') < interval '365 0:0:0' day to second @@ -59,8 +57,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@lineitem #### A masked pattern was here #### 6 1992-04-27 1992-05-02 -PREHOOK: query: -- joins -select +PREHOOK: query: select a.l_orderkey, b.l_orderkey, a.interval1 from ( @@ -79,8 +76,7 @@ order by a.l_orderkey PREHOOK: type: QUERY PREHOOK: Input: default@lineitem #### A masked pattern was here #### -POSTHOOK: query: -- joins -select +POSTHOOK: query: select a.l_orderkey, b.l_orderkey, a.interval1 from ( diff --git a/ql/src/test/results/clientpositive/interval_alt.q.out b/ql/src/test/results/clientpositive/interval_alt.q.out index 8970ede..04d5874 100644 --- a/ql/src/test/results/clientpositive/interval_alt.q.out +++ b/ql/src/test/results/clientpositive/interval_alt.q.out @@ -64,8 +64,7 @@ POSTHOOK: query: insert into t values (1),(2) POSTHOOK: type: QUERY POSTHOOK: Output: default@t POSTHOOK: Lineage: t.dt EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] -PREHOOK: query: -- expressions/columnref -explain +PREHOOK: query: explain select date '2012-01-01' + (-dt*dt) day, date '2012-01-01' - interval (-dt*dt) day, @@ -73,8 +72,7 @@ select date '2012-01-01' + (dt || '-1') year to month from t PREHOOK: type: QUERY -POSTHOOK: query: -- expressions/columnref -explain +POSTHOOK: query: explain select date '2012-01-01' + (-dt*dt) day, date '2012-01-01' - interval (-dt*dt) day, diff --git a/ql/src/test/results/clientpositive/interval_arithmetic.q.out b/ql/src/test/results/clientpositive/interval_arithmetic.q.out index 72e602c..64882f8 100644 --- a/ql/src/test/results/clientpositive/interval_arithmetic.q.out +++ b/ql/src/test/results/clientpositive/interval_arithmetic.q.out @@ -18,8 +18,7 @@ POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: default@interval_arithmetic_1 POSTHOOK: Lineage: interval_arithmetic_1.dateval EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] POSTHOOK: Lineage: interval_arithmetic_1.tsval SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:ctimestamp1, type:timestamp, comment:null), ] -PREHOOK: query: -- interval year-month arithmetic -explain +PREHOOK: query: explain select dateval, dateval - interval '2-2' year to month, @@ -31,8 +30,7 @@ select from interval_arithmetic_1 limit 2 PREHOOK: type: QUERY -POSTHOOK: query: -- interval year-month arithmetic -explain +POSTHOOK: query: explain select dateval, dateval - interval '2-2' year to month, @@ -312,8 +310,7 @@ POSTHOOK: Input: default@interval_arithmetic_1 #### A masked pattern was here #### 5-5 -1-1 5-5 -1-1 -PREHOOK: query: -- interval day-time arithmetic -explain +PREHOOK: query: explain select dateval, dateval - interval '99 11:22:33.123456789' day to second, @@ -325,8 +322,7 @@ select from interval_arithmetic_1 limit 2 PREHOOK: type: QUERY -POSTHOOK: query: -- interval day-time arithmetic -explain +POSTHOOK: query: explain select dateval, dateval - interval '99 11:22:33.123456789' day to second, diff --git a/ql/src/test/results/clientpositive/interval_comparison.q.out b/ql/src/test/results/clientpositive/interval_comparison.q.out index 455808a..532ee66 100644 --- a/ql/src/test/results/clientpositive/interval_comparison.q.out +++ b/ql/src/test/results/clientpositive/interval_comparison.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- should all be true -select +PREHOOK: query: select i1 = i1, i1 = i2, i1 >= i2, @@ -22,8 +21,7 @@ from ( PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- should all be true -select +POSTHOOK: query: select i1 = i1, i1 = i2, i1 >= i2, @@ -47,8 +45,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### true true true true true true true true true true true -PREHOOK: query: -- should all be false -select +PREHOOK: query: select i1 != i1, i1 != i2, i1 < i2, @@ -69,8 +66,7 @@ from ( PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- should all be false -select +POSTHOOK: query: select i1 != i1, i1 != i2, i1 < i2, @@ -92,8 +88,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### false false false false false false false false false -PREHOOK: query: -- should all be true -select +PREHOOK: query: select i1 = i1, i1 = i2, i1 >= i2, @@ -116,8 +111,7 @@ from ( PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- should all be true -select +POSTHOOK: query: select i1 = i1, i1 = i2, i1 >= i2, @@ -141,8 +135,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### true true true true true true true true true true true -PREHOOK: query: -- should all be false -select +PREHOOK: query: select i1 != i1, i1 != i2, i1 < i2, @@ -163,8 +156,7 @@ from ( PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- should all be false -select +POSTHOOK: query: select i1 != i1, i1 != i2, i1 < i2, diff --git a/ql/src/test/results/clientpositive/join0.q.out b/ql/src/test/results/clientpositive/join0.q.out index 392412d..c02319e 100644 --- a/ql/src/test/results/clientpositive/join0.q.out +++ b/ql/src/test/results/clientpositive/join0.q.out @@ -1,7 +1,5 @@ Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 FROM (SELECT * FROM src WHERE src.key < 10) src1 @@ -9,9 +7,7 @@ SELECT src1.key as k1, src1.value as v1, (SELECT * FROM src WHERE src.key < 10) src2 SORT BY k1, v1, k2, v2 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 FROM (SELECT * FROM src WHERE src.key < 10) src1 diff --git a/ql/src/test/results/clientpositive/join10.q.out b/ql/src/test/results/clientpositive/join10.q.out index 49e9ce3..eb67903 100644 --- a/ql/src/test/results/clientpositive/join10.q.out +++ b/ql/src/test/results/clientpositive/join10.q.out @@ -1,15 +1,11 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN FROM +PREHOOK: query: EXPLAIN FROM (SELECT src.* FROM src) x JOIN (SELECT src.* FROM src) Y ON (x.key = Y.key) SELECT Y.* PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN FROM +POSTHOOK: query: EXPLAIN FROM (SELECT src.* FROM src) x JOIN (SELECT src.* FROM src) Y diff --git a/ql/src/test/results/clientpositive/join11.q.out b/ql/src/test/results/clientpositive/join11.q.out index bb9493d..4b5e4d0 100644 --- a/ql/src/test/results/clientpositive/join11.q.out +++ b/ql/src/test/results/clientpositive/join11.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src) src1 @@ -8,9 +6,7 @@ JOIN (SELECT src.key as c3, src.value as c4 from src) src2 ON src1.c1 = src2.c3 AND src1.c1 < 100 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src) src1 diff --git a/ql/src/test/results/clientpositive/join12.q.out b/ql/src/test/results/clientpositive/join12.q.out index 6a77954..b3d3997 100644 --- a/ql/src/test/results/clientpositive/join12.q.out +++ b/ql/src/test/results/clientpositive/join12.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src) src1 @@ -11,9 +9,7 @@ JOIN (SELECT src.key as c5, src.value as c6 from src) src3 ON src1.c1 = src3.c5 AND src3.c5 < 80 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src) src1 diff --git a/ql/src/test/results/clientpositive/join13.q.out b/ql/src/test/results/clientpositive/join13.q.out index 8fdd17b..f562d69 100644 --- a/ql/src/test/results/clientpositive/join13.q.out +++ b/ql/src/test/results/clientpositive/join13.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src) src1 @@ -11,9 +9,7 @@ JOIN (SELECT src.key as c5, src.value as c6 from src) src3 ON src1.c1 + src2.c3 = src3.c5 AND src3.c5 < 200 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src) src1 diff --git a/ql/src/test/results/clientpositive/join14.q.out b/ql/src/test/results/clientpositive/join14.q.out index 24b5a8e..66e42f1 100644 --- a/ql/src/test/results/clientpositive/join14.q.out +++ b/ql/src/test/results/clientpositive/join14.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) - -CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) - -CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/join15.q.out b/ql/src/test/results/clientpositive/join15.q.out index 66c395c..6127b9f 100644 --- a/ql/src/test/results/clientpositive/join15.q.out +++ b/ql/src/test/results/clientpositive/join15.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src src1 JOIN src src2 ON (src1.key = src2.key) SORT BY src1.key, src1.value, src2.key, src2.value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src src1 JOIN src src2 ON (src1.key = src2.key) SORT BY src1.key, src1.value, src2.key, src2.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/join17.q.out b/ql/src/test/results/clientpositive/join17.q.out index 7e0e7e3..6c8ed4f 100644 --- a/ql/src/test/results/clientpositive/join17.q.out +++ b/ql/src/test/results/clientpositive/join17.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/join18.q.out b/ql/src/test/results/clientpositive/join18.q.out index 5e0c0be..3d5a90f 100644 --- a/ql/src/test/results/clientpositive/join18.q.out +++ b/ql/src/test/results/clientpositive/join18.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.key, a.value, b.key, b.value FROM ( @@ -13,9 +11,7 @@ EXPLAIN ) b ON (a.key = b.key) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.key, a.value, b.key, b.value FROM ( diff --git a/ql/src/test/results/clientpositive/join18_multi_distinct.q.out b/ql/src/test/results/clientpositive/join18_multi_distinct.q.out index 228c5b0..b064af2 100644 --- a/ql/src/test/results/clientpositive/join18_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/join18_multi_distinct.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.key, a.value, b.key, b.value1, b.value2 FROM ( @@ -14,9 +12,7 @@ EXPLAIN ) b ON (a.key = b.key) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.key, a.value, b.key, b.value1, b.value2 FROM ( diff --git a/ql/src/test/results/clientpositive/join19.q.out b/ql/src/test/results/clientpositive/join19.q.out index 32591a6..dd167ec 100644 --- a/ql/src/test/results/clientpositive/join19.q.out +++ b/ql/src/test/results/clientpositive/join19.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE triples (foo string, subject string, predicate string, object string, foo2 string) +PREHOOK: query: CREATE TABLE triples (foo string, subject string, predicate string, object string, foo2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@triples -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE triples (foo string, subject string, predicate string, object string, foo2 string) +POSTHOOK: query: CREATE TABLE triples (foo string, subject string, predicate string, object string, foo2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@triples diff --git a/ql/src/test/results/clientpositive/join2.q.out b/ql/src/test/results/clientpositive/join2.q.out index 78ca76e..e3d26a2 100644 --- a/ql/src/test/results/clientpositive/join2.q.out +++ b/ql/src/test/results/clientpositive/join2.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j2 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j2 diff --git a/ql/src/test/results/clientpositive/join20.q.out b/ql/src/test/results/clientpositive/join20.q.out index 473d6cb..583436d 100644 --- a/ql/src/test/results/clientpositive/join20.q.out +++ b/ql/src/test/results/clientpositive/join20.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src src1 JOIN src src2 ON (src1.key = src2.key AND src1.key < 10) RIGHT OUTER JOIN src src3 ON (src1.key = src3.key AND src3.key < 20) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src src1 JOIN src src2 ON (src1.key = src2.key AND src1.key < 10) RIGHT OUTER JOIN src src3 ON (src1.key = src3.key AND src3.key < 20) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/join21.q.out b/ql/src/test/results/clientpositive/join21.q.out index 93eff62..29f449e 100644 --- a/ql/src/test/results/clientpositive/join21.q.out +++ b/ql/src/test/results/clientpositive/join21.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/join23.q.out b/ql/src/test/results/clientpositive/join23.q.out index 4144c81..ecef4f8 100644 --- a/ql/src/test/results/clientpositive/join23.q.out +++ b/ql/src/test/results/clientpositive/join23.q.out @@ -1,12 +1,8 @@ Warning: Shuffle Join JOIN[4][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 SORT BY src1.key, src1.value, src2.key, src2.value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 SORT BY src1.key, src1.value, src2.key, src2.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/join25.q.out b/ql/src/test/results/clientpositive/join25.q.out index af707aa..5f3f01c 100644 --- a/ql/src/test/results/clientpositive/join25.q.out +++ b/ql/src/test/results/clientpositive/join25.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/join26.q.out b/ql/src/test/results/clientpositive/join26.q.out index 781c0e5..8889804 100644 --- a/ql/src/test/results/clientpositive/join26.q.out +++ b/ql/src/test/results/clientpositive/join26.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/join27.q.out b/ql/src/test/results/clientpositive/join27.q.out index 0c07cb2..fce7d95 100644 --- a/ql/src/test/results/clientpositive/join27.q.out +++ b/ql/src/test/results/clientpositive/join27.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/join28.q.out b/ql/src/test/results/clientpositive/join28.q.out index 9aadd28..309bdcd 100644 --- a/ql/src/test/results/clientpositive/join28.q.out +++ b/ql/src/test/results/clientpositive/join28.q.out @@ -1,18 +1,12 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_j1 SELECT subq.key1, z.value FROM @@ -20,9 +14,7 @@ FROM FROM src1 x JOIN src y ON (x.key = y.key)) subq JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11) PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_j1 SELECT subq.key1, z.value FROM diff --git a/ql/src/test/results/clientpositive/join29.q.out b/ql/src/test/results/clientpositive/join29.q.out index 29dab0e..b53143d 100644 --- a/ql/src/test/results/clientpositive/join29.q.out +++ b/ql/src/test/results/clientpositive/join29.q.out @@ -1,26 +1,18 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, cnt1 INT, cnt2 INT) +PREHOOK: query: CREATE TABLE dest_j1(key STRING, cnt1 INT, cnt2 INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, cnt1 INT, cnt2 INT) +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, cnt1 INT, cnt2 INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_j1 SELECT subq1.key, subq1.cnt, subq2.cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key) PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_j1 SELECT subq1.key, subq1.cnt, subq2.cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN diff --git a/ql/src/test/results/clientpositive/join3.q.out b/ql/src/test/results/clientpositive/join3.q.out index ec99ced..fb378f4 100644 --- a/ql/src/test/results/clientpositive/join3.q.out +++ b/ql/src/test/results/clientpositive/join3.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/join30.q.out b/ql/src/test/results/clientpositive/join30.q.out index ee9c49b..9d29cef 100644 --- a/ql/src/test/results/clientpositive/join30.q.out +++ b/ql/src/test/results/clientpositive/join30.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, cnt INT) +PREHOOK: query: CREATE TABLE dest_j1(key INT, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, cnt INT) +POSTHOOK: query: CREATE TABLE dest_j1(key INT, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/join31.q.out b/ql/src/test/results/clientpositive/join31.q.out index 8bc94fe..32eab4f 100644 --- a/ql/src/test/results/clientpositive/join31.q.out +++ b/ql/src/test/results/clientpositive/join31.q.out @@ -1,27 +1,19 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, cnt INT) +PREHOOK: query: CREATE TABLE dest_j1(key STRING, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, cnt INT) +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_j1 SELECT subq1.key, count(1) as cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key) group by subq1.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_j1 SELECT subq1.key, count(1) as cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN diff --git a/ql/src/test/results/clientpositive/join32.q.out b/ql/src/test/results/clientpositive/join32.q.out index bebb007..ef4582e 100644 --- a/ql/src/test/results/clientpositive/join32.q.out +++ b/ql/src/test/results/clientpositive/join32.q.out @@ -1,26 +1,18 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11) PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) diff --git a/ql/src/test/results/clientpositive/join33.q.out b/ql/src/test/results/clientpositive/join33.q.out index bebb007..ef4582e 100644 --- a/ql/src/test/results/clientpositive/join33.q.out +++ b/ql/src/test/results/clientpositive/join33.q.out @@ -1,26 +1,18 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11) PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) diff --git a/ql/src/test/results/clientpositive/join34.q.out b/ql/src/test/results/clientpositive/join34.q.out index e8f51ea..391f0b4 100644 --- a/ql/src/test/results/clientpositive/join34.q.out +++ b/ql/src/test/results/clientpositive/join34.q.out @@ -1,18 +1,12 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, x.value, subq1.value FROM @@ -22,9 +16,7 @@ FROM ) subq1 JOIN src1 x ON (x.key = subq1.key) PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, x.value, subq1.value FROM diff --git a/ql/src/test/results/clientpositive/join35.q.out b/ql/src/test/results/clientpositive/join35.q.out index e8d69fd..38e0023 100644 --- a/ql/src/test/results/clientpositive/join35.q.out +++ b/ql/src/test/results/clientpositive/join35.q.out @@ -1,18 +1,12 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 INT) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, x.value, subq1.cnt FROM @@ -22,9 +16,7 @@ FROM ) subq1 JOIN src1 x ON (x.key = subq1.key) PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, x.value, subq1.cnt FROM diff --git a/ql/src/test/results/clientpositive/join36.q.out b/ql/src/test/results/clientpositive/join36.q.out index 275860a..206d5ff 100644 --- a/ql/src/test/results/clientpositive/join36.q.out +++ b/ql/src/test/results/clientpositive/join36.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tmp1(key INT, cnt INT) +PREHOOK: query: CREATE TABLE tmp1(key INT, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmp1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tmp1(key INT, cnt INT) +POSTHOOK: query: CREATE TABLE tmp1(key INT, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmp1 diff --git a/ql/src/test/results/clientpositive/join37.q.out b/ql/src/test/results/clientpositive/join37.q.out index 8820c16..5ee88f7 100644 --- a/ql/src/test/results/clientpositive/join37.q.out +++ b/ql/src/test/results/clientpositive/join37.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/join39.q.out b/ql/src/test/results/clientpositive/join39.q.out index 02b1837..ee99675 100644 --- a/ql/src/test/results/clientpositive/join39.q.out +++ b/ql/src/test/results/clientpositive/join39.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, key1 string, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, key1 string, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, key1 string, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, key1 string, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/join4.q.out b/ql/src/test/results/clientpositive/join4.q.out index eebf398..d9510f5 100644 --- a/ql/src/test/results/clientpositive/join4.q.out +++ b/ql/src/test/results/clientpositive/join4.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/join40.q.out b/ql/src/test/results/clientpositive/join40.q.out index 3234bef..949c8fb 100644 --- a/ql/src/test/results/clientpositive/join40.q.out +++ b/ql/src/test/results/clientpositive/join40.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN SELECT x.key, x.value, y.key, y.value +PREHOOK: query: EXPLAIN SELECT x.key, x.value, y.key, y.value FROM src x left outer JOIN (select * from src where key <= 100) y ON (x.key = y.key) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN SELECT x.key, x.value, y.key, y.value +POSTHOOK: query: EXPLAIN SELECT x.key, x.value, y.key, y.value FROM src x left outer JOIN (select * from src where key <= 100) y ON (x.key = y.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/join41.q.out b/ql/src/test/results/clientpositive/join41.q.out index 1d70e0e..c2eedd0 100644 --- a/ql/src/test/results/clientpositive/join41.q.out +++ b/ql/src/test/results/clientpositive/join41.q.out @@ -87,12 +87,10 @@ POSTHOOK: Input: default@s1 0 val_0 NULL NULL 0 val_0 NULL NULL 0 val_0 NULL NULL -PREHOOK: query: -- Make sure the big table is chosen correctly as part of HIVE-4146 -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM s1 src1 LEFT OUTER JOIN s1 src2 ON (src1.key = src2.key AND src2.key > 10) PREHOOK: type: QUERY -POSTHOOK: query: -- Make sure the big table is chosen correctly as part of HIVE-4146 -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM s1 src1 LEFT OUTER JOIN s1 src2 ON (src1.key = src2.key AND src2.key > 10) POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/join42.q.out b/ql/src/test/results/clientpositive/join42.q.out index 2d03654..0cd31c6 100644 --- a/ql/src/test/results/clientpositive/join42.q.out +++ b/ql/src/test/results/clientpositive/join42.q.out @@ -87,8 +87,7 @@ POSTHOOK: Lineage: acct.acc_n EXPRESSION [(values__tmp__table__2)values__tmp__ta POSTHOOK: Lineage: acct.aid EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] POSTHOOK: Lineage: acct.brn EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col3, type:string, comment:), ] Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: --[HIVE-10841] (WHERE col is not null) does not work sometimes for queries with many JOIN statements -explain select +PREHOOK: query: explain select acct.ACC_N, acct.brn FROM L @@ -101,8 +100,7 @@ WHERE L.id = 4436 and acct.brn is not null PREHOOK: type: QUERY -POSTHOOK: query: --[HIVE-10841] (WHERE col is not null) does not work sometimes for queries with many JOIN statements -explain select +POSTHOOK: query: explain select acct.ACC_N, acct.brn FROM L diff --git a/ql/src/test/results/clientpositive/join44.q.out b/ql/src/test/results/clientpositive/join44.q.out index 4472834..84e44c5 100644 --- a/ql/src/test/results/clientpositive/join44.q.out +++ b/ql/src/test/results/clientpositive/join44.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE mytable(val1 INT, val2 INT, val3 INT) +PREHOOK: query: CREATE TABLE mytable(val1 INT, val2 INT, val3 INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@mytable -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE mytable(val1 INT, val2 INT, val3 INT) +POSTHOOK: query: CREATE TABLE mytable(val1 INT, val2 INT, val3 INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@mytable diff --git a/ql/src/test/results/clientpositive/join45.q.out b/ql/src/test/results/clientpositive/join45.q.out index 18a7876..249fe9c 100644 --- a/ql/src/test/results/clientpositive/join45.q.out +++ b/ql/src/test/results/clientpositive/join45.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- Conjunction with pred on multiple inputs and single inputs -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src1 JOIN src ON (src1.key=src.key @@ -7,8 +6,7 @@ ON (src1.key=src.key AND src.value between 100 and 102) LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Conjunction with pred on multiple inputs and single inputs -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src1 JOIN src ON (src1.key=src.key @@ -108,15 +106,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### -PREHOOK: query: -- Conjunction with pred on multiple inputs and none -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src1 JOIN src ON (src1.key=src.key AND true) LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Conjunction with pred on multiple inputs and none -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src1 JOIN src ON (src1.key=src.key AND true) @@ -215,8 +211,7 @@ POSTHOOK: Input: default@src1 224 224 val_224 224 224 val_224 Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: -- Conjunction with pred on single inputs and none -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src1 JOIN src ON (src1.value between 100 and 102 @@ -224,8 +219,7 @@ ON (src1.value between 100 and 102 AND true) LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Conjunction with pred on single inputs and none -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src1 JOIN src ON (src1.value between 100 and 102 @@ -317,8 +311,7 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: -- Disjunction with pred on multiple inputs and single inputs -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src1 JOIN src ON (src1.key=src.key @@ -326,8 +319,7 @@ ON (src1.key=src.key OR src.value between 100 and 102) LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Disjunction with pred on multiple inputs and single inputs -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src1 JOIN src ON (src1.key=src.key @@ -426,16 +418,14 @@ POSTHOOK: Input: default@src1 128 128 val_128 128 128 val_128 Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: -- Conjunction with multiple inputs on one side -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src1 JOIN src ON (src1.key+src.key >= 100 AND src1.key+src.key <= 102) LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Conjunction with multiple inputs on one side -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src1 JOIN src ON (src1.key+src.key >= 100 @@ -527,16 +517,14 @@ POSTHOOK: Input: default@src1 98 val_98 2 val_2 98 val_98 4 val_4 Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: -- Disjunction with multiple inputs on one side -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src1 JOIN src ON (src1.key+src.key >= 100 OR src1.key+src.key <= 102) LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Disjunction with multiple inputs on one side -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src1 JOIN src ON (src1.key+src.key >= 100 @@ -632,15 +620,13 @@ POSTHOOK: Input: default@src1 406 val_406 477 val_477 406 val_406 414 val_414 Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: -- Function with multiple inputs on one side -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src1 JOIN src ON ((src1.key,src.key) IN ((100,100),(101,101),(102,102))) LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Function with multiple inputs on one side -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src1 JOIN src ON ((src1.key,src.key) IN ((100,100),(101,101),(102,102))) @@ -723,16 +709,14 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product -PREHOOK: query: -- Chained 1 -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src JOIN src1 a ON (a.key+src.key >= 100) LEFT OUTER JOIN src1 b ON (b.key = src.key) LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Chained 1 -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src JOIN src1 a ON (a.key+src.key >= 100) @@ -872,16 +856,14 @@ POSTHOOK: Input: default@src1 98 val_98 150 val_150 98 val_98 98 val_98 401 val_401 98 val_98 Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product -PREHOOK: query: -- Chained 2 -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src LEFT OUTER JOIN src1 a ON (a.key = src.key) JOIN src1 b ON (b.key+src.key<= 102) LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Chained 2 -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src LEFT OUTER JOIN src1 a ON (a.key = src.key) @@ -1017,16 +999,14 @@ POSTHOOK: Input: default@src1 35 val_35 NULL NULL 66 val_66 35 val_35 NULL NULL 66 val_66 Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: -- Chained 3 -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src JOIN src1 a ON (a.key+src.key >= 100) RIGHT OUTER JOIN src1 b ON (b.key = src.key) LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Chained 3 -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src JOIN src1 a ON (a.key+src.key >= 100) @@ -1162,16 +1142,14 @@ NULL NULL NULL NULL NULL NULL NULL NULL val_193 NULL NULL NULL NULL val_265 Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product -PREHOOK: query: -- Chained 4 -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src RIGHT OUTER JOIN src1 a ON (a.key = src.key) JOIN src1 b ON (b.key+src.key<= 102) LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Chained 4 -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src RIGHT OUTER JOIN src1 a ON (a.key = src.key) @@ -1297,16 +1275,14 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### Warning: Shuffle Join JOIN[8][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: -- Chained 5 -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src JOIN src1 a ON (a.key+src.key >= 100) FULL OUTER JOIN src1 b ON (b.key = src.key) LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Chained 5 -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src JOIN src1 a ON (a.key+src.key >= 100) @@ -1442,16 +1418,14 @@ NULL NULL NULL NULL NULL NULL NULL NULL val_193 NULL NULL NULL NULL val_265 Warning: Shuffle Join JOIN[11][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product -PREHOOK: query: -- Chained 6 -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src FULL OUTER JOIN src1 a ON (a.key = src.key) JOIN src1 b ON (b.key+src.key<= 102) LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Chained 6 -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src FULL OUTER JOIN src1 a ON (a.key = src.key) @@ -1587,8 +1561,7 @@ POSTHOOK: Input: default@src1 35 val_35 NULL NULL 66 val_66 35 val_35 NULL NULL 66 val_66 Warning: Shuffle Join JOIN[18][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: -- Right outer join with multiple inner joins and mixed conditions -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM cbo_t1 t1 RIGHT OUTER JOIN cbo_t1 t2 ON (t2.key = t1.key) @@ -1597,8 +1570,7 @@ JOIN cbo_t1 t4 ON (t4.key = t2.key or t2.c_float = t4.c_float and t4.value = t2 JOIN cbo_t1 t5 ON (t5.key = t2.key or t2.c_boolean = t4.c_boolean and t5.c_int = 42) LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Right outer join with multiple inner joins and mixed conditions -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM cbo_t1 t1 RIGHT OUTER JOIN cbo_t1 t2 ON (t2.key = t1.key) diff --git a/ql/src/test/results/clientpositive/join5.q.out b/ql/src/test/results/clientpositive/join5.q.out index e4d514f..bf1cde3 100644 --- a/ql/src/test/results/clientpositive/join5.q.out +++ b/ql/src/test/results/clientpositive/join5.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/join6.q.out b/ql/src/test/results/clientpositive/join6.q.out index 1f591c4..77c8c3a 100644 --- a/ql/src/test/results/clientpositive/join6.q.out +++ b/ql/src/test/results/clientpositive/join6.q.out @@ -6,9 +6,7 @@ POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN FROM ( FROM ( @@ -23,9 +21,7 @@ FROM ( ) c INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM ( FROM ( diff --git a/ql/src/test/results/clientpositive/join7.q.out b/ql/src/test/results/clientpositive/join7.q.out index f38fabd..f0f437c 100644 --- a/ql/src/test/results/clientpositive/join7.q.out +++ b/ql/src/test/results/clientpositive/join7.q.out @@ -6,9 +6,7 @@ POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING, c5 INT POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN FROM ( FROM ( @@ -28,9 +26,7 @@ FROM ( ) c INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM ( FROM ( diff --git a/ql/src/test/results/clientpositive/join8.q.out b/ql/src/test/results/clientpositive/join8.q.out index 47b821a..c1035b4 100644 --- a/ql/src/test/results/clientpositive/join8.q.out +++ b/ql/src/test/results/clientpositive/join8.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/join9.q.out b/ql/src/test/results/clientpositive/join9.q.out index efddb5d..af30007 100644 --- a/ql/src/test/results/clientpositive/join9.q.out +++ b/ql/src/test/results/clientpositive/join9.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/join_1to1.q.out b/ql/src/test/results/clientpositive/join_1to1.q.out index b1fc8f0..1ccb613 100644 --- a/ql/src/test/results/clientpositive/join_1to1.q.out +++ b/ql/src/test/results/clientpositive/join_1to1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE join_1to1_1(key1 int, key2 int, value int) +PREHOOK: query: CREATE TABLE join_1to1_1(key1 int, key2 int, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@join_1to1_1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE join_1to1_1(key1 int, key2 int, value int) +POSTHOOK: query: CREATE TABLE join_1to1_1(key1 int, key2 int, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@join_1to1_1 diff --git a/ql/src/test/results/clientpositive/join_array.q.out b/ql/src/test/results/clientpositive/join_array.q.out index 56a4360..66c143a 100644 --- a/ql/src/test/results/clientpositive/join_array.q.out +++ b/ql/src/test/results/clientpositive/join_array.q.out @@ -30,15 +30,11 @@ POSTHOOK: query: load data local inpath '../../data/files/tiny_b.txt' into table POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@tinyb -PREHOOK: query: -- SORT_QUERY_RESULTS - -select * from tinyA +PREHOOK: query: select * from tinyA PREHOOK: type: QUERY PREHOOK: Input: default@tinya #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select * from tinyA +POSTHOOK: query: select * from tinyA POSTHOOK: type: QUERY POSTHOOK: Input: default@tinya #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/join_casesensitive.q.out b/ql/src/test/results/clientpositive/join_casesensitive.q.out index 4978b88..f9bc3c0 100644 --- a/ql/src/test/results/clientpositive/join_casesensitive.q.out +++ b/ql/src/test/results/clientpositive/join_casesensitive.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE joinone(key1 int, key2 int, value int) +PREHOOK: query: CREATE TABLE joinone(key1 int, key2 int, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@joinone -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE joinone(key1 int, key2 int, value int) +POSTHOOK: query: CREATE TABLE joinone(key1 int, key2 int, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@joinone diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual5.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual5.q.out index 985c645..b72220d 100644 --- a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual5.q.out +++ b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual5.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- outer join is not qualified for pushing down of where to join condition -CREATE TABLE ltable (index int, la int, lk1 string, lk2 string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' +PREHOOK: query: CREATE TABLE ltable (index int, la int, lk1 string, lk2 string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@ltable -POSTHOOK: query: -- outer join is not qualified for pushing down of where to join condition -CREATE TABLE ltable (index int, la int, lk1 string, lk2 string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' +POSTHOOK: query: CREATE TABLE ltable (index int, la int, lk1 string, lk2 string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@ltable diff --git a/ql/src/test/results/clientpositive/join_filters_overlap.q.out b/ql/src/test/results/clientpositive/join_filters_overlap.q.out index efcee84..8515b87 100644 --- a/ql/src/test/results/clientpositive/join_filters_overlap.q.out +++ b/ql/src/test/results/clientpositive/join_filters_overlap.q.out @@ -1,26 +1,18 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- HIVE-3411 Filter predicates on outer join overlapped on single alias is not handled properly - -create table a as SELECT 100 as key, a.value as value FROM src LATERAL VIEW explode(array(40, 50, 60)) a as value limit 3 +PREHOOK: query: create table a as SELECT 100 as key, a.value as value FROM src LATERAL VIEW explode(array(40, 50, 60)) a as value limit 3 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@a -POSTHOOK: query: -- SORT_QUERY_RESULTS --- HIVE-3411 Filter predicates on outer join overlapped on single alias is not handled properly - -create table a as SELECT 100 as key, a.value as value FROM src LATERAL VIEW explode(array(40, 50, 60)) a as value limit 3 +POSTHOOK: query: create table a as SELECT 100 as key, a.value as value FROM src LATERAL VIEW explode(array(40, 50, 60)) a as value limit 3 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default POSTHOOK: Output: default@a POSTHOOK: Lineage: a.key SIMPLE [] POSTHOOK: Lineage: a.value SCRIPT [] -PREHOOK: query: -- overlap on a -explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) +PREHOOK: query: explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) PREHOOK: type: QUERY -POSTHOOK: query: -- overlap on a -explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) +POSTHOOK: query: explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -207,11 +199,9 @@ POSTHOOK: Input: default@a 100 40 NULL NULL NULL NULL 100 50 100 50 NULL NULL 100 60 NULL NULL 100 60 -PREHOOK: query: -- overlap on b -explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) +PREHOOK: query: explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) PREHOOK: type: QUERY -POSTHOOK: query: -- overlap on b -explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) +POSTHOOK: query: explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -398,11 +388,9 @@ POSTHOOK: Input: default@a 100 50 100 50 NULL NULL NULL NULL 100 40 NULL NULL NULL NULL 100 60 100 60 -PREHOOK: query: -- overlap on b with two filters for each -explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50 AND b.value>10) left outer join a c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) +PREHOOK: query: explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50 AND b.value>10) left outer join a c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) PREHOOK: type: QUERY -POSTHOOK: query: -- overlap on b with two filters for each -explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50 AND b.value>10) left outer join a c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) +POSTHOOK: query: explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50 AND b.value>10) left outer join a c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -589,11 +577,9 @@ POSTHOOK: Input: default@a 100 50 100 50 NULL NULL NULL NULL 100 40 NULL NULL NULL NULL 100 60 100 60 -PREHOOK: query: -- overlap on a, b -explain extended select * from a full outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) +PREHOOK: query: explain extended select * from a full outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) PREHOOK: type: QUERY -POSTHOOK: query: -- overlap on a, b -explain extended select * from a full outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) +POSTHOOK: query: explain extended select * from a full outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -792,11 +778,9 @@ POSTHOOK: Input: default@a 100 60 NULL NULL NULL NULL NULL NULL NULL NULL 100 40 NULL NULL NULL NULL NULL NULL 100 60 100 60 NULL NULL -PREHOOK: query: -- triple overlap on a -explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) +PREHOOK: query: explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) PREHOOK: type: QUERY -POSTHOOK: query: -- triple overlap on a -explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) +POSTHOOK: query: explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/join_literals.q.out b/ql/src/test/results/clientpositive/join_literals.q.out index eab2085..e2655d1 100644 --- a/ql/src/test/results/clientpositive/join_literals.q.out +++ b/ql/src/test/results/clientpositive/join_literals.q.out @@ -1,13 +1,9 @@ WARNING: Comparing a bigint and a string may result in a loss of precision. -PREHOOK: query: -- Test Joins with a variety of literals in the on clause - -SELECT COUNT(*) FROM src a JOIN src b ON a.key = b.key AND a.key = 0L +PREHOOK: query: SELECT COUNT(*) FROM src a JOIN src b ON a.key = b.key AND a.key = 0L PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Test Joins with a variety of literals in the on clause - -SELECT COUNT(*) FROM src a JOIN src b ON a.key = b.key AND a.key = 0L +POSTHOOK: query: SELECT COUNT(*) FROM src a JOIN src b ON a.key = b.key AND a.key = 0L POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/join_map_ppr.q.out b/ql/src/test/results/clientpositive/join_map_ppr.q.out index e44ceac..8877672 100644 --- a/ql/src/test/results/clientpositive/join_map_ppr.q.out +++ b/ql/src/test/results/clientpositive/join_map_ppr.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out b/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out index a212dcf..e22d6fe 100644 --- a/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out +++ b/ql/src/test/results/clientpositive/join_merge_multi_expressions.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS -explain +PREHOOK: query: explain select count(*) from srcpart a join srcpart b on a.key = b.key and a.hr = b.hr join srcpart c on a.hr = c.hr and a.key = c.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS -explain +POSTHOOK: query: explain select count(*) from srcpart a join srcpart b on a.key = b.key and a.hr = b.hr join srcpart c on a.hr = c.hr and a.key = c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/join_on_varchar.q.out b/ql/src/test/results/clientpositive/join_on_varchar.q.out index 6939fe7..ace445d 100644 --- a/ql/src/test/results/clientpositive/join_on_varchar.q.out +++ b/ql/src/test/results/clientpositive/join_on_varchar.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table tbl1(c1 varchar(10), intcol int) +PREHOOK: query: create table tbl1(c1 varchar(10), intcol int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table tbl1(c1 varchar(10), intcol int) +POSTHOOK: query: create table tbl1(c1 varchar(10), intcol int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tbl1 diff --git a/ql/src/test/results/clientpositive/join_rc.q.out b/ql/src/test/results/clientpositive/join_rc.q.out index c18c850..a15231b 100644 --- a/ql/src/test/results/clientpositive/join_rc.q.out +++ b/ql/src/test/results/clientpositive/join_rc.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table join_rc1(key string, value string) stored as RCFile +PREHOOK: query: create table join_rc1(key string, value string) stored as RCFile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@join_rc1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table join_rc1(key string, value string) stored as RCFile +POSTHOOK: query: create table join_rc1(key string, value string) stored as RCFile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@join_rc1 diff --git a/ql/src/test/results/clientpositive/join_reorder.q.out b/ql/src/test/results/clientpositive/join_reorder.q.out index 6091a5f..5cef0c9 100644 --- a/ql/src/test/results/clientpositive/join_reorder.q.out +++ b/ql/src/test/results/clientpositive/join_reorder.q.out @@ -46,14 +46,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t3 -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key +PREHOOK: query: EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key SELECT a.key, a.val, c.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key +POSTHOOK: query: EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key SELECT a.key, a.val, c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/join_thrift.q.out b/ql/src/test/results/clientpositive/join_thrift.q.out index 97b55c2..b05d0e3 100644 --- a/ql/src/test/results/clientpositive/join_thrift.q.out +++ b/ql/src/test/results/clientpositive/join_thrift.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -DESCRIBE src_thrift +PREHOOK: query: DESCRIBE src_thrift PREHOOK: type: DESCTABLE PREHOOK: Input: default@src_thrift -POSTHOOK: query: -- SORT_QUERY_RESULTS - -DESCRIBE src_thrift +POSTHOOK: query: DESCRIBE src_thrift POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@src_thrift aint int from deserializer diff --git a/ql/src/test/results/clientpositive/join_vc.q.out b/ql/src/test/results/clientpositive/join_vc.q.out index f37a8b3..62671f2 100644 --- a/ql/src/test/results/clientpositive/join_vc.q.out +++ b/ql/src/test/results/clientpositive/join_vc.q.out @@ -1,14 +1,6 @@ -PREHOOK: query: -- see HIVE-4033 earlier a flag named hasVC was not initialized correctly in MapOperator.java, resulting in NPE for following query. order by and limit in the query is not relevant, problem would be evident even without those. They are there to keep .q.out file small and sorted. - --- SORT_QUERY_RESULTS - -explain select t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value from src t1 join src t2 on t1.key = t2.key join src t3 on t2.value = t3.value order by t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value limit 3 +PREHOOK: query: explain select t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value from src t1 join src t2 on t1.key = t2.key join src t3 on t2.value = t3.value order by t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value limit 3 PREHOOK: type: QUERY -POSTHOOK: query: -- see HIVE-4033 earlier a flag named hasVC was not initialized correctly in MapOperator.java, resulting in NPE for following query. order by and limit in the query is not relevant, problem would be evident even without those. They are there to keep .q.out file small and sorted. - --- SORT_QUERY_RESULTS - -explain select t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value from src t1 join src t2 on t1.key = t2.key join src t3 on t2.value = t3.value order by t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value limit 3 +POSTHOOK: query: explain select t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value from src t1 join src t2 on t1.key = t2.key join src t3 on t2.value = t3.value order by t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value limit 3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/join_view.q.out b/ql/src/test/results/clientpositive/join_view.q.out index b1bfb00..394706d 100644 --- a/ql/src/test/results/clientpositive/join_view.q.out +++ b/ql/src/test/results/clientpositive/join_view.q.out @@ -22,17 +22,13 @@ POSTHOOK: query: create table invites2 (foo int, bar string) partitioned by (ds POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@invites2 -PREHOOK: query: -- test join views: see HIVE-1989 - -create view v as select invites.bar, invites2.foo, invites2.ds from invites join invites2 on invites.ds=invites2.ds +PREHOOK: query: create view v as select invites.bar, invites2.foo, invites2.ds from invites join invites2 on invites.ds=invites2.ds PREHOOK: type: CREATEVIEW PREHOOK: Input: default@invites PREHOOK: Input: default@invites2 PREHOOK: Output: database:default PREHOOK: Output: default@v -POSTHOOK: query: -- test join views: see HIVE-1989 - -create view v as select invites.bar, invites2.foo, invites2.ds from invites join invites2 on invites.ds=invites2.ds +POSTHOOK: query: create view v as select invites.bar, invites2.foo, invites2.ds from invites join invites2 on invites.ds=invites2.ds POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@invites POSTHOOK: Input: default@invites2 diff --git a/ql/src/test/results/clientpositive/keyword_1.q.out b/ql/src/test/results/clientpositive/keyword_1.q.out index e215642..621a739 100644 --- a/ql/src/test/results/clientpositive/keyword_1.q.out +++ b/ql/src/test/results/clientpositive/keyword_1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF - -create table test_user (`user` string, `group` string) +PREHOOK: query: create table test_user (`user` string, `group` string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_user -POSTHOOK: query: -- SORT_BEFORE_DIFF - -create table test_user (`user` string, `group` string) +POSTHOOK: query: create table test_user (`user` string, `group` string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_user diff --git a/ql/src/test/results/clientpositive/lateral_view_cp.q.out b/ql/src/test/results/clientpositive/lateral_view_cp.q.out index bf0ae96..2024d25 100644 --- a/ql/src/test/results/clientpositive/lateral_view_cp.q.out +++ b/ql/src/test/results/clientpositive/lateral_view_cp.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: --HIVE 3226 -drop table array_valued_src +PREHOOK: query: drop table array_valued_src PREHOOK: type: DROPTABLE -POSTHOOK: query: --HIVE 3226 -drop table array_valued_src +POSTHOOK: query: drop table array_valued_src POSTHOOK: type: DROPTABLE PREHOOK: query: create table array_valued_src (key string, value array) PREHOOK: type: CREATETABLE @@ -22,11 +20,9 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@array_valued_src POSTHOOK: Lineage: array_valued_src.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: array_valued_src.value EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- replace sel(*) to sel(exprs) for reflecting CP result properly -explain select count(val) from (select a.key as key, b.value as array_val from src a join array_valued_src b on a.key=b.key) i lateral view explode (array_val) c as val +PREHOOK: query: explain select count(val) from (select a.key as key, b.value as array_val from src a join array_valued_src b on a.key=b.key) i lateral view explode (array_val) c as val PREHOOK: type: QUERY -POSTHOOK: query: -- replace sel(*) to sel(exprs) for reflecting CP result properly -explain select count(val) from (select a.key as key, b.value as array_val from src a join array_valued_src b on a.key=b.key) i lateral view explode (array_val) c as val +POSTHOOK: query: explain select count(val) from (select a.key as key, b.value as array_val from src a join array_valued_src b on a.key=b.key) i lateral view explode (array_val) c as val POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/lateral_view_noalias.q.out b/ql/src/test/results/clientpositive/lateral_view_noalias.q.out index e8d23d4..47b1249 100644 --- a/ql/src/test/results/clientpositive/lateral_view_noalias.q.out +++ b/ql/src/test/results/clientpositive/lateral_view_noalias.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: --HIVE-2608 Do not require AS a,b,c part in LATERAL VIEW -EXPLAIN SELECT myTab.* from src LATERAL VIEW explode(map('key1', 100, 'key2', 200)) myTab limit 2 +PREHOOK: query: EXPLAIN SELECT myTab.* from src LATERAL VIEW explode(map('key1', 100, 'key2', 200)) myTab limit 2 PREHOOK: type: QUERY -POSTHOOK: query: --HIVE-2608 Do not require AS a,b,c part in LATERAL VIEW -EXPLAIN SELECT myTab.* from src LATERAL VIEW explode(map('key1', 100, 'key2', 200)) myTab limit 2 +POSTHOOK: query: EXPLAIN SELECT myTab.* from src LATERAL VIEW explode(map('key1', 100, 'key2', 200)) myTab limit 2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -100,14 +98,12 @@ POSTHOOK: Input: default@src #### A masked pattern was here #### key1 100 key2 200 -PREHOOK: query: -- view -create view lv_noalias as SELECT myTab.* from src LATERAL VIEW explode(map('key1', 100, 'key2', 200)) myTab limit 2 +PREHOOK: query: create view lv_noalias as SELECT myTab.* from src LATERAL VIEW explode(map('key1', 100, 'key2', 200)) myTab limit 2 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@lv_noalias -POSTHOOK: query: -- view -create view lv_noalias as SELECT myTab.* from src LATERAL VIEW explode(map('key1', 100, 'key2', 200)) myTab limit 2 +POSTHOOK: query: create view lv_noalias as SELECT myTab.* from src LATERAL VIEW explode(map('key1', 100, 'key2', 200)) myTab limit 2 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/lateral_view_onview.q.out b/ql/src/test/results/clientpositive/lateral_view_onview.q.out index 015c701..6ce31f4 100644 --- a/ql/src/test/results/clientpositive/lateral_view_onview.q.out +++ b/ql/src/test/results/clientpositive/lateral_view_onview.q.out @@ -461,27 +461,23 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Verify that * selects columns from both tables -SELECT * FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol SORT BY c1 ASC, myCol ASC LIMIT 1 +PREHOOK: query: SELECT * FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol SORT BY c1 ASC, myCol ASC LIMIT 1 PREHOOK: type: QUERY PREHOOK: Input: default@lv_table PREHOOK: Input: default@lv_view #### A masked pattern was here #### -POSTHOOK: query: -- Verify that * selects columns from both tables -SELECT * FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol SORT BY c1 ASC, myCol ASC LIMIT 1 +POSTHOOK: query: SELECT * FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol SORT BY c1 ASC, myCol ASC LIMIT 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@lv_table POSTHOOK: Input: default@lv_view #### A masked pattern was here #### abc [1,2,3] 100 t 1 -PREHOOK: query: -- TABLE.* should be supported -SELECT myTable.* FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LIMIT 3 +PREHOOK: query: SELECT myTable.* FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LIMIT 3 PREHOOK: type: QUERY PREHOOK: Input: default@lv_table PREHOOK: Input: default@lv_view #### A masked pattern was here #### -POSTHOOK: query: -- TABLE.* should be supported -SELECT myTable.* FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LIMIT 3 +POSTHOOK: query: SELECT myTable.* FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LIMIT 3 POSTHOOK: type: QUERY POSTHOOK: Input: default@lv_table POSTHOOK: Input: default@lv_view @@ -489,14 +485,12 @@ POSTHOOK: Input: default@lv_view 1 2 3 -PREHOOK: query: -- Multiple lateral views should result in a Cartesian product -SELECT myTable.myCol, myTable2.myCol2 FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LATERAL VIEW explode(array('a', 'b', 'c')) myTable2 AS myCol2 LIMIT 9 +PREHOOK: query: SELECT myTable.myCol, myTable2.myCol2 FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LATERAL VIEW explode(array('a', 'b', 'c')) myTable2 AS myCol2 LIMIT 9 PREHOOK: type: QUERY PREHOOK: Input: default@lv_table PREHOOK: Input: default@lv_view #### A masked pattern was here #### -POSTHOOK: query: -- Multiple lateral views should result in a Cartesian product -SELECT myTable.myCol, myTable2.myCol2 FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LATERAL VIEW explode(array('a', 'b', 'c')) myTable2 AS myCol2 LIMIT 9 +POSTHOOK: query: SELECT myTable.myCol, myTable2.myCol2 FROM lv_view LATERAL VIEW explode(array(1,2,3)) myTable AS myCol LATERAL VIEW explode(array('a', 'b', 'c')) myTable2 AS myCol2 LIMIT 9 POSTHOOK: type: QUERY POSTHOOK: Input: default@lv_table POSTHOOK: Input: default@lv_view @@ -510,14 +504,12 @@ POSTHOOK: Input: default@lv_view 3 a 3 b 3 c -PREHOOK: query: -- Should be able to reference tables generated earlier -SELECT myTable2.* FROM lv_view LATERAL VIEW explode(array(array(1,2,3))) myTable AS myCol LATERAL VIEW explode(myTable.myCol) myTable2 AS myCol2 LIMIT 3 +PREHOOK: query: SELECT myTable2.* FROM lv_view LATERAL VIEW explode(array(array(1,2,3))) myTable AS myCol LATERAL VIEW explode(myTable.myCol) myTable2 AS myCol2 LIMIT 3 PREHOOK: type: QUERY PREHOOK: Input: default@lv_table PREHOOK: Input: default@lv_view #### A masked pattern was here #### -POSTHOOK: query: -- Should be able to reference tables generated earlier -SELECT myTable2.* FROM lv_view LATERAL VIEW explode(array(array(1,2,3))) myTable AS myCol LATERAL VIEW explode(myTable.myCol) myTable2 AS myCol2 LIMIT 3 +POSTHOOK: query: SELECT myTable2.* FROM lv_view LATERAL VIEW explode(array(array(1,2,3))) myTable AS myCol LATERAL VIEW explode(myTable.myCol) myTable2 AS myCol2 LIMIT 3 POSTHOOK: type: QUERY POSTHOOK: Input: default@lv_table POSTHOOK: Input: default@lv_view diff --git a/ql/src/test/results/clientpositive/lateral_view_outer.q.out b/ql/src/test/results/clientpositive/lateral_view_outer.q.out index 994945a..342ed4b 100644 --- a/ql/src/test/results/clientpositive/lateral_view_outer.q.out +++ b/ql/src/test/results/clientpositive/lateral_view_outer.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- UDTF forwards nothing, OUTER LV add null for that -explain +PREHOOK: query: explain select * from src LATERAL VIEW OUTER explode(array()) C AS a limit 10 PREHOOK: type: QUERY -POSTHOOK: query: -- UDTF forwards nothing, OUTER LV add null for that -explain +POSTHOOK: query: explain select * from src LATERAL VIEW OUTER explode(array()) C AS a limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -90,12 +88,10 @@ POSTHOOK: Input: default@src 278 val_278 NULL 98 val_98 NULL 484 val_484 NULL -PREHOOK: query: -- backward compatible (UDTF forwards something for OUTER LV) -explain +PREHOOK: query: explain select * from src LATERAL VIEW OUTER explode(array(4,5)) C AS a limit 10 PREHOOK: type: QUERY -POSTHOOK: query: -- backward compatible (UDTF forwards something for OUTER LV) -explain +POSTHOOK: query: explain select * from src LATERAL VIEW OUTER explode(array(4,5)) C AS a limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/lateral_view_ppd.q.out b/ql/src/test/results/clientpositive/lateral_view_ppd.q.out index 259985d..2e3adab 100644 --- a/ql/src/test/results/clientpositive/lateral_view_ppd.q.out +++ b/ql/src/test/results/clientpositive/lateral_view_ppd.q.out @@ -413,11 +413,9 @@ val_0 2 val_0 3 val_0 3 val_0 3 -PREHOOK: query: -- HIVE-4293 Predicates following UDTF operator are removed by PPD -EXPLAIN SELECT value, myCol FROM (SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol WHERE myCol > 1) a WHERE key='0' +PREHOOK: query: EXPLAIN SELECT value, myCol FROM (SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol WHERE myCol > 1) a WHERE key='0' PREHOOK: type: QUERY -POSTHOOK: query: -- HIVE-4293 Predicates following UDTF operator are removed by PPD -EXPLAIN SELECT value, myCol FROM (SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol WHERE myCol > 1) a WHERE key='0' +POSTHOOK: query: EXPLAIN SELECT value, myCol FROM (SELECT * FROM src LATERAL VIEW explode(array(1,2,3)) myTable AS myCol WHERE myCol > 1) a WHERE key='0' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/lb_fs_stats.q.out b/ql/src/test/results/clientpositive/lb_fs_stats.q.out index 8344125..b07192b 100644 --- a/ql/src/test/results/clientpositive/lb_fs_stats.q.out +++ b/ql/src/test/results/clientpositive/lb_fs_stats.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- Tests truncating a column from a list bucketing table - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - -CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE +PREHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Tests truncating a column from a list bucketing table - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - -CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE +POSTHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_tab diff --git a/ql/src/test/results/clientpositive/leadlag.q.out b/ql/src/test/results/clientpositive/leadlag.q.out index 86718ae..2705082 100644 --- a/ql/src/test/results/clientpositive/leadlag.q.out +++ b/ql/src/test/results/clientpositive/leadlag.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: --1. testLagWithPTFWindowing -select p_mfgr, p_name, +PREHOOK: query: select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_retailprice, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1, @@ -11,8 +10,7 @@ order by p_name PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: --1. testLagWithPTFWindowing -select p_mfgr, p_name, +POSTHOOK: query: select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_retailprice, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1, @@ -50,8 +48,7 @@ Manufacturer#5 almond antique medium spring khaki 2 2 1611.66 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 3 3 1788.73 5190.08 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 4 4 1018.1 6208.18 46 44 Manufacturer#5 almond azure blanched chiffon midnight 5 5 1464.48 7672.66 23 -23 -PREHOOK: query: -- 2. testLagWithWindowingNoPTF -select p_mfgr, p_name, +PREHOOK: query: select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_retailprice, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1, @@ -60,8 +57,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 2. testLagWithWindowingNoPTF -select p_mfgr, p_name, +POSTHOOK: query: select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, p_retailprice, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1, @@ -96,15 +92,13 @@ Manufacturer#5 almond antique medium spring khaki 2 2 1611.66 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 3 3 1788.73 5190.08 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 4 4 1018.1 6208.18 46 44 Manufacturer#5 almond azure blanched chiffon midnight 5 5 1464.48 7672.66 23 -23 -PREHOOK: query: -- 3. testJoinWithLag -select p1.p_mfgr, p1.p_name, +PREHOOK: query: select p1.p_mfgr, p1.p_name, p1.p_size, p1.p_size - lag(p1.p_size,1,p1.p_size) over( distribute by p1.p_mfgr sort by p1.p_name) as deltaSz from part p1 join part p2 on p1.p_partkey = p2.p_partkey PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 3. testJoinWithLag -select p1.p_mfgr, p1.p_name, +POSTHOOK: query: select p1.p_mfgr, p1.p_name, p1.p_size, p1.p_size - lag(p1.p_size,1,p1.p_size) over( distribute by p1.p_mfgr sort by p1.p_name) as deltaSz from part p1 join part p2 on p1.p_partkey = p2.p_partkey POSTHOOK: type: QUERY @@ -138,16 +132,14 @@ Manufacturer#5 almond antique medium spring khaki 6 -25 Manufacturer#5 almond antique sky peru orange 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 46 44 Manufacturer#5 almond azure blanched chiffon midnight 23 -23 -PREHOOK: query: -- 4. testLagInSum -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, sum(p_size - lag(p_size,1)) over(distribute by p_mfgr sort by p_name ) as deltaSum from part window w1 as (rows between 2 preceding and 2 following) PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 4. testLagInSum -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, sum(p_size - lag(p_size,1)) over(distribute by p_mfgr sort by p_name ) as deltaSum from part window w1 as (rows between 2 preceding and 2 following) @@ -180,16 +172,14 @@ Manufacturer#5 almond antique medium spring khaki 6 -25 Manufacturer#5 almond antique sky peru orange 2 -29 Manufacturer#5 almond aquamarine dodger light gainsboro 46 15 Manufacturer#5 almond azure blanched chiffon midnight 23 -8 -PREHOOK: query: -- 5. testLagInSumOverWindow -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, sum(p_size - lag(p_size,1)) over w1 as deltaSum from part window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 following) PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 5. testLagInSumOverWindow -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, sum(p_size - lag(p_size,1)) over w1 as deltaSum from part window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 following) @@ -222,10 +212,7 @@ Manufacturer#5 almond antique medium spring khaki 6 15 Manufacturer#5 almond antique sky peru orange 2 -8 Manufacturer#5 almond aquamarine dodger light gainsboro 46 17 Manufacturer#5 almond azure blanched chiffon midnight 23 21 -PREHOOK: query: -- 6. testRankInLead --- disable cbo because of CALCITE-653 - -select p_mfgr, p_name, p_size, r1, +PREHOOK: query: select p_mfgr, p_name, p_size, r1, lead(r1,1,r1) over (distribute by p_mfgr sort by p_name) as deltaRank from ( select p_mfgr, p_name, p_size, @@ -235,10 +222,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 6. testRankInLead --- disable cbo because of CALCITE-653 - -select p_mfgr, p_name, p_size, r1, +POSTHOOK: query: select p_mfgr, p_name, p_size, r1, lead(r1,1,r1) over (distribute by p_mfgr sort by p_name) as deltaRank from ( select p_mfgr, p_name, p_size, @@ -274,8 +258,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 3 Manufacturer#5 almond antique sky peru orange 2 3 4 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 5 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 -PREHOOK: query: -- 7. testLeadWithPTF -select p_mfgr, p_name, +PREHOOK: query: select p_mfgr, p_name, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, p_size, p_size - lead(p_size,1,p_size) over(distribute by p_mfgr sort by p_name) as deltaSz @@ -286,8 +269,7 @@ order by p_name PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 7. testLeadWithPTF -select p_mfgr, p_name, +POSTHOOK: query: select p_mfgr, p_name, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, p_size, p_size - lead(p_size,1,p_size) over(distribute by p_mfgr sort by p_name) as deltaSz @@ -324,8 +306,7 @@ Manufacturer#5 almond antique medium spring khaki 2 2 6 4 Manufacturer#5 almond antique sky peru orange 3 3 2 -44 Manufacturer#5 almond aquamarine dodger light gainsboro 4 4 46 23 Manufacturer#5 almond azure blanched chiffon midnight 5 5 23 0 -PREHOOK: query: -- 8. testOverNoPartitionMultipleAggregate -select p_name, p_retailprice, +PREHOOK: query: select p_name, p_retailprice, lead(p_retailprice) over() as l1 , lag(p_retailprice) over() as l2 from part @@ -333,8 +314,7 @@ where p_retailprice = 1173.15 PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 8. testOverNoPartitionMultipleAggregate -select p_name, p_retailprice, +POSTHOOK: query: select p_name, p_retailprice, lead(p_retailprice) over() as l1 , lag(p_retailprice) over() as l2 from part diff --git a/ql/src/test/results/clientpositive/leadlag_queries.q.out b/ql/src/test/results/clientpositive/leadlag_queries.q.out index 56c528f..c8d23e0 100644 --- a/ql/src/test/results/clientpositive/leadlag_queries.q.out +++ b/ql/src/test/results/clientpositive/leadlag_queries.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- 1. testLeadUDAF -select p_mfgr, p_retailprice, +PREHOOK: query: select p_mfgr, p_retailprice, lead(p_retailprice) over (partition by p_mfgr order by p_name) as l1, lead(p_retailprice,1) over (partition by p_mfgr order by p_name) as l2, lead(p_retailprice,1,10) over (partition by p_mfgr order by p_name) as l3, @@ -9,8 +8,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 1. testLeadUDAF -select p_mfgr, p_retailprice, +POSTHOOK: query: select p_mfgr, p_retailprice, lead(p_retailprice) over (partition by p_mfgr order by p_name) as l1, lead(p_retailprice,1) over (partition by p_mfgr order by p_name) as l2, lead(p_retailprice,1,10) over (partition by p_mfgr order by p_name) as l3, @@ -46,16 +44,14 @@ Manufacturer#5 1611.66 1788.73 1788.73 1788.73 1788.73 -177.06999999999994 Manufacturer#5 1788.73 1018.1 1018.1 1018.1 1018.1 770.63 Manufacturer#5 1018.1 1464.48 1464.48 1464.48 1464.48 -446.38 Manufacturer#5 1464.48 NULL NULL 10.0 1464.48 0.0 -PREHOOK: query: -- 2.testLeadUDAFPartSz1 -select p_mfgr, p_name, p_retailprice, +PREHOOK: query: select p_mfgr, p_name, p_retailprice, lead(p_retailprice,1) over (partition by p_mfgr, p_name ), p_retailprice - lead(p_retailprice,1,p_retailprice) over (partition by p_mfgr, p_name) from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 2.testLeadUDAFPartSz1 -select p_mfgr, p_name, p_retailprice, +POSTHOOK: query: select p_mfgr, p_name, p_retailprice, lead(p_retailprice,1) over (partition by p_mfgr, p_name ), p_retailprice - lead(p_retailprice,1,p_retailprice) over (partition by p_mfgr, p_name) from part @@ -88,8 +84,7 @@ Manufacturer#5 almond antique medium spring khaki 1611.66 NULL 0.0 Manufacturer#5 almond antique sky peru orange 1788.73 NULL 0.0 Manufacturer#5 almond aquamarine dodger light gainsboro 1018.1 NULL 0.0 Manufacturer#5 almond azure blanched chiffon midnight 1464.48 NULL 0.0 -PREHOOK: query: -- 3.testLagUDAF -select p_mfgr, p_retailprice, +PREHOOK: query: select p_mfgr, p_retailprice, lag(p_retailprice,1) over (partition by p_mfgr order by p_name) as l1, lag(p_retailprice) over (partition by p_mfgr order by p_name) as l2, lag(p_retailprice,1, p_retailprice) over (partition by p_mfgr order by p_name) as l3, @@ -99,8 +94,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 3.testLagUDAF -select p_mfgr, p_retailprice, +POSTHOOK: query: select p_mfgr, p_retailprice, lag(p_retailprice,1) over (partition by p_mfgr order by p_name) as l1, lag(p_retailprice) over (partition by p_mfgr order by p_name) as l2, lag(p_retailprice,1, p_retailprice) over (partition by p_mfgr order by p_name) as l3, @@ -136,16 +130,14 @@ Manufacturer#5 1611.66 1789.69 1789.69 1789.69 1789.69 -178.02999999999997 Manufacturer#5 1788.73 1611.66 1611.66 1611.66 1611.66 177.06999999999994 Manufacturer#5 1018.1 1788.73 1788.73 1788.73 1788.73 -770.63 Manufacturer#5 1464.48 1018.1 1018.1 1018.1 1018.1 446.38 -PREHOOK: query: -- 4.testLagUDAFPartSz1 -select p_mfgr, p_name, p_retailprice, +PREHOOK: query: select p_mfgr, p_name, p_retailprice, lag(p_retailprice,1) over (partition by p_mfgr, p_name ), p_retailprice - lag(p_retailprice,1,p_retailprice) over (partition by p_mfgr, p_name) from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 4.testLagUDAFPartSz1 -select p_mfgr, p_name, p_retailprice, +POSTHOOK: query: select p_mfgr, p_name, p_retailprice, lag(p_retailprice,1) over (partition by p_mfgr, p_name ), p_retailprice - lag(p_retailprice,1,p_retailprice) over (partition by p_mfgr, p_name) from part @@ -178,8 +170,7 @@ Manufacturer#5 almond antique medium spring khaki 1611.66 NULL 0.0 Manufacturer#5 almond antique sky peru orange 1788.73 NULL 0.0 Manufacturer#5 almond aquamarine dodger light gainsboro 1018.1 NULL 0.0 Manufacturer#5 almond azure blanched chiffon midnight 1464.48 NULL 0.0 -PREHOOK: query: -- 5.testLeadLagUDAF -select p_mfgr, p_retailprice, +PREHOOK: query: select p_mfgr, p_retailprice, lead(p_retailprice,1) over (partition by p_mfgr order by p_name) as l1, lead(p_retailprice,1, p_retailprice) over (partition by p_mfgr order by p_name) as l2, p_retailprice - lead(p_retailprice,1,p_retailprice) over (partition by p_mfgr order by p_name), @@ -189,8 +180,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 5.testLeadLagUDAF -select p_mfgr, p_retailprice, +POSTHOOK: query: select p_mfgr, p_retailprice, lead(p_retailprice,1) over (partition by p_mfgr order by p_name) as l1, lead(p_retailprice,1, p_retailprice) over (partition by p_mfgr order by p_name) as l2, p_retailprice - lead(p_retailprice,1,p_retailprice) over (partition by p_mfgr order by p_name), diff --git a/ql/src/test/results/clientpositive/leftsemijoin.q.out b/ql/src/test/results/clientpositive/leftsemijoin.q.out index 11f0bb0..a11bbc4 100644 --- a/ql/src/test/results/clientpositive/leftsemijoin.q.out +++ b/ql/src/test/results/clientpositive/leftsemijoin.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -drop table sales +PREHOOK: query: drop table sales PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -drop table sales +POSTHOOK: query: drop table sales POSTHOOK: type: DROPTABLE PREHOOK: query: drop table things PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out b/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out index 07b9c6e..40ac1e6 100644 --- a/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out +++ b/ql/src/test/results/clientpositive/limit_partition_metadataonly.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain select ds from srcpart where hr=11 and ds='2008-04-08' +PREHOOK: query: explain select ds from srcpart where hr=11 and ds='2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain select ds from srcpart where hr=11 and ds='2008-04-08' +POSTHOOK: query: explain select ds from srcpart where hr=11 and ds='2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/limit_pushdown2.q.out b/ql/src/test/results/clientpositive/limit_pushdown2.q.out index b44b529..7b8a2b6 100644 --- a/ql/src/test/results/clientpositive/limit_pushdown2.q.out +++ b/ql/src/test/results/clientpositive/limit_pushdown2.q.out @@ -562,14 +562,12 @@ POSTHOOK: Input: default@src 76 val_76 77.0 74 val_74 75.0 72 val_72 73.0 -PREHOOK: query: -- NOT APPLICABLE -explain +PREHOOK: query: explain select value, avg(key + 1) myavg from src group by value order by myavg, value desc limit 20 PREHOOK: type: QUERY -POSTHOOK: query: -- NOT APPLICABLE -explain +POSTHOOK: query: explain select value, avg(key + 1) myavg from src group by value order by myavg, value desc limit 20 @@ -679,14 +677,12 @@ val_27 28.0 val_28 29.0 val_30 31.0 val_33 34.0 -PREHOOK: query: -- NOT APPLICABLE -explain +PREHOOK: query: explain select key, value, avg(key + 1) from src group by value, key with rollup order by key, value limit 20 PREHOOK: type: QUERY -POSTHOOK: query: -- NOT APPLICABLE -explain +POSTHOOK: query: explain select key, value, avg(key + 1) from src group by value, key with rollup order by key, value limit 20 diff --git a/ql/src/test/results/clientpositive/limit_pushdown_negative.q.out b/ql/src/test/results/clientpositive/limit_pushdown_negative.q.out index b5e9809..a92656b 100644 --- a/ql/src/test/results/clientpositive/limit_pushdown_negative.q.out +++ b/ql/src/test/results/clientpositive/limit_pushdown_negative.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- negative, RS + join -explain select * from src a join src b on a.key=b.key limit 20 +PREHOOK: query: explain select * from src a join src b on a.key=b.key limit 20 PREHOOK: type: QUERY -POSTHOOK: query: -- negative, RS + join -explain select * from src a join src b on a.key=b.key limit 20 +POSTHOOK: query: explain select * from src a join src b on a.key=b.key limit 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -70,11 +68,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- negative, RS + filter -explain select value, sum(key) as sum from src group by value having sum > 100 limit 20 +PREHOOK: query: explain select value, sum(key) as sum from src group by value having sum > 100 limit 20 PREHOOK: type: QUERY -POSTHOOK: query: -- negative, RS + filter -explain select value, sum(key) as sum from src group by value having sum > 100 limit 20 +POSTHOOK: query: explain select value, sum(key) as sum from src group by value having sum > 100 limit 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -130,11 +126,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- negative, RS + lateral view -explain select key, L.* from (select * from src order by key) a lateral view explode(array(value, value)) L as v limit 10 +PREHOOK: query: explain select key, L.* from (select * from src order by key) a lateral view explode(array(value, value)) L as v limit 10 PREHOOK: type: QUERY -POSTHOOK: query: -- negative, RS + lateral view -explain select key, L.* from (select * from src order by key) a lateral view explode(array(value, value)) L as v limit 10 +POSTHOOK: query: explain select key, L.* from (select * from src order by key) a lateral view explode(array(value, value)) L as v limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -215,13 +209,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- negative, RS + forward + multi-groupby -CREATE TABLE dest_2(key STRING, c1 INT) +PREHOOK: query: CREATE TABLE dest_2(key STRING, c1 INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_2 -POSTHOOK: query: -- negative, RS + forward + multi-groupby -CREATE TABLE dest_2(key STRING, c1 INT) +POSTHOOK: query: CREATE TABLE dest_2(key STRING, c1 INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_2 diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out index 3dbf618..376657a 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out @@ -1,33 +1,21 @@ -PREHOOK: query: -- list bucketing DML : dynamic partition and 2 stage query plan. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - --- create a skewed table -create table list_bucketing_dynamic_part (key String, value String) +PREHOOK: query: create table list_bucketing_dynamic_part (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_dynamic_part -POSTHOOK: query: -- list bucketing DML : dynamic partition and 2 stage query plan. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - --- create a skewed table -create table list_bucketing_dynamic_part (key String, value String) +POSTHOOK: query: create table list_bucketing_dynamic_part (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_dynamic_part -PREHOOK: query: -- list bucketing DML -explain extended +PREHOOK: query: explain extended insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- list bucketing DML -explain extended +POSTHOOK: query: explain extended insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -223,12 +211,10 @@ POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=11).ke POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- check DML result -desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11') +PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11') PREHOOK: type: DESCTABLE PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: -- check DML result -desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11') +POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11') POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@list_bucketing_dynamic_part # col_name data_type comment @@ -428,13 +414,11 @@ POSTHOOK: Input: default@list_bucketing_dynamic_part POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=11 #### A masked pattern was here #### 484 val_484 -PREHOOK: query: -- clean up resources -drop table list_bucketing_dynamic_part +PREHOOK: query: drop table list_bucketing_dynamic_part PREHOOK: type: DROPTABLE PREHOOK: Input: default@list_bucketing_dynamic_part PREHOOK: Output: default@list_bucketing_dynamic_part -POSTHOOK: query: -- clean up resources -drop table list_bucketing_dynamic_part +POSTHOOK: query: drop table list_bucketing_dynamic_part POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@list_bucketing_dynamic_part POSTHOOK: Output: default@list_bucketing_dynamic_part diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_11.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_11.q.out index 7d07b16..f358211 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_11.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_11.q.out @@ -1,11 +1,4 @@ -PREHOOK: query: -- Ensure it works if skewed column is not the first column in the table columns - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - --- list bucketing DML: static partition. multiple skewed columns. - --- create a skewed table -create table list_bucketing_static_part (key String, value String) +PREHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (value) on ('val_466','val_287','val_82') stored as DIRECTORIES @@ -13,14 +6,7 @@ create table list_bucketing_static_part (key String, value String) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: -- Ensure it works if skewed column is not the first column in the table columns - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - --- list bucketing DML: static partition. multiple skewed columns. - --- create a skewed table -create table list_bucketing_static_part (key String, value String) +POSTHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (value) on ('val_466','val_287','val_82') stored as DIRECTORIES @@ -28,13 +14,11 @@ create table list_bucketing_static_part (key String, value String) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_static_part -PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files. -explain extended +PREHOOK: query: explain extended insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') select key, value from src PREHOOK: type: QUERY -POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files. -explain extended +POSTHOOK: query: explain extended insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') select key, value from src POSTHOOK: type: QUERY @@ -177,12 +161,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- check DML result -show partitions list_bucketing_static_part +PREHOOK: query: show partitions list_bucketing_static_part PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: -- check DML result -show partitions list_bucketing_static_part +POSTHOOK: query: show partitions list_bucketing_static_part POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@list_bucketing_static_part ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out index 1f66fc3..32fd99b 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_12.q.out @@ -1,10 +1,4 @@ -PREHOOK: query: -- Ensure it works if skewed column is not the first column in the table columns - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns -create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) +PREHOOK: query: create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) partitioned by (ds String, hr String) skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82')) stored as DIRECTORIES @@ -12,13 +6,7 @@ create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_mul_col -POSTHOOK: query: -- Ensure it works if skewed column is not the first column in the table columns - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns -create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) +POSTHOOK: query: create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) partitioned by (ds String, hr String) skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82')) stored as DIRECTORIES @@ -26,13 +14,11 @@ create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_mul_col -PREHOOK: query: -- list bucketing DML -explain extended +PREHOOK: query: explain extended insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '11') select 1, key, 1, value, 1 from src PREHOOK: type: QUERY -POSTHOOK: query: -- list bucketing DML -explain extended +POSTHOOK: query: explain extended insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '11') select 1, key, 1, value, 1 from src POSTHOOK: type: QUERY @@ -178,12 +164,10 @@ POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col2 SI POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col3 EXPRESSION [] POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col4 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=11).col5 EXPRESSION [] -PREHOOK: query: -- check DML result -show partitions list_bucketing_mul_col +PREHOOK: query: show partitions list_bucketing_mul_col PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@list_bucketing_mul_col -POSTHOOK: query: -- check DML result -show partitions list_bucketing_mul_col +POSTHOOK: query: show partitions list_bucketing_mul_col POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@list_bucketing_mul_col ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_13.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_13.q.out index 2a1adb8..9cb318c 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_13.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_13.q.out @@ -1,10 +1,4 @@ -PREHOOK: query: -- Ensure skewed value map has escaped directory name - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns -create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) +PREHOOK: query: create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) partitioned by (ds String, hr String) skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82')) stored as DIRECTORIES @@ -12,13 +6,7 @@ create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_mul_col -POSTHOOK: query: -- Ensure skewed value map has escaped directory name - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- test where the skewed values are more than 1 say columns no. 2 and 4 in a table with 5 columns -create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) +POSTHOOK: query: create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 String, col5 string) partitioned by (ds String, hr String) skewed by (col2, col4) on (('466','val_466'),('287','val_287'),('82','val_82')) stored as DIRECTORIES @@ -26,13 +14,11 @@ create table list_bucketing_mul_col (col1 String, col2 String, col3 String, col4 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_mul_col -PREHOOK: query: -- list bucketing DML -explain extended +PREHOOK: query: explain extended insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '2013-01-23+18:00:99') select 1, key, 1, value, 1 from src PREHOOK: type: QUERY -POSTHOOK: query: -- list bucketing DML -explain extended +POSTHOOK: query: explain extended insert overwrite table list_bucketing_mul_col partition (ds = '2008-04-08', hr = '2013-01-23+18:00:99') select 1, key, 1, value, 1 from src POSTHOOK: type: QUERY @@ -178,12 +164,10 @@ POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=2013-01-23+ POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=2013-01-23+18:00:99).col3 EXPRESSION [] POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=2013-01-23+18:00:99).col4 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_mul_col PARTITION(ds=2008-04-08,hr=2013-01-23+18:00:99).col5 EXPRESSION [] -PREHOOK: query: -- check DML result -show partitions list_bucketing_mul_col +PREHOOK: query: show partitions list_bucketing_mul_col PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@list_bucketing_mul_col -POSTHOOK: query: -- check DML result -show partitions list_bucketing_mul_col +POSTHOOK: query: show partitions list_bucketing_mul_col POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@list_bucketing_mul_col ds=2008-04-08/hr=2013-01-23+18%3A00%3A99 diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out index ebbbb26..c294bfb 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_14.q.out @@ -1,31 +1,19 @@ -PREHOOK: query: -- list bucketing DML : unpartitioned table and 2 stage query plan. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - --- create a skewed table -create table list_bucketing (key String, value String) +PREHOOK: query: create table list_bucketing (key String, value String) skewed by (key) on ("484") stored as DIRECTORIES PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing -POSTHOOK: query: -- list bucketing DML : unpartitioned table and 2 stage query plan. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - --- create a skewed table -create table list_bucketing (key String, value String) +POSTHOOK: query: create table list_bucketing (key String, value String) skewed by (key) on ("484") stored as DIRECTORIES POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing -PREHOOK: query: -- list bucketing DML -explain extended +PREHOOK: query: explain extended insert overwrite table list_bucketing select * from src PREHOOK: type: QUERY -POSTHOOK: query: -- list bucketing DML -explain extended +POSTHOOK: query: explain extended insert overwrite table list_bucketing select * from src POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -167,12 +155,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@list_bucketing POSTHOOK: Lineage: list_bucketing.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- check DML result -desc formatted list_bucketing +PREHOOK: query: desc formatted list_bucketing PREHOOK: type: DESCTABLE PREHOOK: Input: default@list_bucketing -POSTHOOK: query: -- check DML result -desc formatted list_bucketing +POSTHOOK: query: desc formatted list_bucketing POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@list_bucketing # col_name data_type comment @@ -349,13 +335,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@list_bucketing #### A masked pattern was here #### 484 val_484 -PREHOOK: query: -- clean up resources -drop table list_bucketing +PREHOOK: query: drop table list_bucketing PREHOOK: type: DROPTABLE PREHOOK: Input: default@list_bucketing PREHOOK: Output: default@list_bucketing -POSTHOOK: query: -- clean up resources -drop table list_bucketing +POSTHOOK: query: drop table list_bucketing POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@list_bucketing POSTHOOK: Output: default@list_bucketing diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_2.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_2.q.out index 2dbcf39..b34a20e 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_2.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_2.q.out @@ -1,19 +1,4 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- list bucketing DML: static partition. multiple skewed columns. --- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME: --- 5263 000000_0 --- 5263 000001_0 --- ds=2008-04-08/hr=11/key=103/value=val_103: --- 99 000000_0 --- 99 000001_0 --- ds=2008-04-08/hr=11/key=484/value=val_484: --- 87 000000_0 --- 87 000001_0 - --- create a skewed table -create table list_bucketing_static_part (key String, value String) +PREHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES @@ -21,22 +6,7 @@ create table list_bucketing_static_part (key String, value String) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- list bucketing DML: static partition. multiple skewed columns. --- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME: --- 5263 000000_0 --- 5263 000001_0 --- ds=2008-04-08/hr=11/key=103/value=val_103: --- 99 000000_0 --- 99 000001_0 --- ds=2008-04-08/hr=11/key=484/value=val_484: --- 87 000000_0 --- 87 000001_0 - --- create a skewed table -create table list_bucketing_static_part (key String, value String) +POSTHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES @@ -44,13 +14,11 @@ create table list_bucketing_static_part (key String, value String) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_static_part -PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files. -explain extended +PREHOOK: query: explain extended insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files. -explain extended +POSTHOOK: query: explain extended insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY @@ -246,12 +214,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- check DML result -show partitions list_bucketing_static_part +PREHOOK: query: show partitions list_bucketing_static_part PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: -- check DML result -show partitions list_bucketing_static_part +POSTHOOK: query: show partitions list_bucketing_static_part POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@list_bucketing_static_part ds=2008-04-08/hr=11 @@ -417,17 +383,13 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 484 val_484 2008-04-08 11 484 val_484 2008-04-08 12 -PREHOOK: query: -- 51 and val_51 in the table so skewed data for 51 and val_14 should be none --- but query should succeed for 51 or 51 and val_14 -select * from srcpart where ds = '2008-04-08' and key = '51' +PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '51' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### -POSTHOOK: query: -- 51 and val_51 in the table so skewed data for 51 and val_14 should be none --- but query should succeed for 51 or 51 and val_14 -select * from srcpart where ds = '2008-04-08' and key = '51' +POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '51' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -473,15 +435,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@list_bucketing_static_part POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 #### A masked pattern was here #### -PREHOOK: query: -- queries with < <= > >= should work for skewed test although we don't benefit from pruning -select count(1) from srcpart where ds = '2008-04-08' and key < '51' +PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key < '51' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### -POSTHOOK: query: -- queries with < <= > >= should work for skewed test although we don't benefit from pruning -select count(1) from srcpart where ds = '2008-04-08' and key < '51' +POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key < '51' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -571,13 +531,11 @@ POSTHOOK: Input: default@list_bucketing_static_part POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 #### A masked pattern was here #### 90 -PREHOOK: query: -- clean up -drop table list_bucketing_static_part +PREHOOK: query: drop table list_bucketing_static_part PREHOOK: type: DROPTABLE PREHOOK: Input: default@list_bucketing_static_part PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: -- clean up -drop table list_bucketing_static_part +POSTHOOK: query: drop table list_bucketing_static_part POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@list_bucketing_static_part POSTHOOK: Output: default@list_bucketing_static_part diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out index 049c57a..d832b53 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out @@ -1,27 +1,15 @@ -PREHOOK: query: -- list bucketing DML : static partition and 2 stage query plan. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - --- create a skewed table -create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES +PREHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: -- list bucketing DML : static partition and 2 stage query plan. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - --- create a skewed table -create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES +POSTHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ("484") stored as DIRECTORIES POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_static_part -PREHOOK: query: -- list bucketing DML -explain extended +PREHOOK: query: explain extended insert overwrite table list_bucketing_static_part partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- list bucketing DML -explain extended +POSTHOOK: query: explain extended insert overwrite table list_bucketing_static_part partition (ds='2008-04-08', hr='11') select key, value from srcpart where ds='2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -214,12 +202,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- check DML result -desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11') +PREHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11') PREHOOK: type: DESCTABLE PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: -- check DML result -desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11') +POSTHOOK: query: desc formatted list_bucketing_static_part partition (ds='2008-04-08', hr='11') POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@list_bucketing_static_part # col_name data_type comment @@ -375,13 +361,11 @@ POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 #### A masked pattern was here #### 484 val_484 484 val_484 -PREHOOK: query: -- clean up resources -drop table list_bucketing_static_part +PREHOOK: query: drop table list_bucketing_static_part PREHOOK: type: DROPTABLE PREHOOK: Input: default@list_bucketing_static_part PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: -- clean up resources -drop table list_bucketing_static_part +POSTHOOK: query: drop table list_bucketing_static_part POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@list_bucketing_static_part POSTHOOK: Output: default@list_bucketing_static_part diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out index cbe3579..0f0320b 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_4.q.out @@ -1,23 +1,4 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- list bucketing DML: static partition. multiple skewed columns. merge. --- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME: --- 5263 000000_0 --- 5263 000001_0 --- ds=2008-04-08/hr=11/key=103/value=val_103: --- 99 000000_0 --- 99 000001_0 --- after merge --- 142 000000_0 --- ds=2008-04-08/hr=11/key=484/value=val_484: --- 87 000000_0 --- 87 000001_0 --- after merge --- 118 000001_0 - --- create a skewed table -create table list_bucketing_static_part (key String, value String) +PREHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES @@ -25,26 +6,7 @@ create table list_bucketing_static_part (key String, value String) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- list bucketing DML: static partition. multiple skewed columns. merge. --- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME: --- 5263 000000_0 --- 5263 000001_0 --- ds=2008-04-08/hr=11/key=103/value=val_103: --- 99 000000_0 --- 99 000001_0 --- after merge --- 142 000000_0 --- ds=2008-04-08/hr=11/key=484/value=val_484: --- 87 000000_0 --- 87 000001_0 --- after merge --- 118 000001_0 - --- create a skewed table -create table list_bucketing_static_part (key String, value String) +POSTHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES @@ -52,13 +14,11 @@ create table list_bucketing_static_part (key String, value String) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_static_part -PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files. -explain extended +PREHOOK: query: explain extended insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files. -explain extended +POSTHOOK: query: explain extended insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY @@ -254,12 +214,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- check DML result -show partitions list_bucketing_static_part +PREHOOK: query: show partitions list_bucketing_static_part PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: -- check DML result -show partitions list_bucketing_static_part +POSTHOOK: query: show partitions list_bucketing_static_part POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@list_bucketing_static_part ds=2008-04-08/hr=11 @@ -308,13 +266,11 @@ Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103/value=val_103, [484, val_484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484/value=val_484} Storage Desc Params: serialization.format 1 -PREHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files. -explain extended +PREHOOK: query: explain extended insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files. -explain extended +POSTHOOK: query: explain extended insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY @@ -622,12 +578,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- check DML result -show partitions list_bucketing_static_part +PREHOOK: query: show partitions list_bucketing_static_part PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: -- check DML result -show partitions list_bucketing_static_part +POSTHOOK: query: show partitions list_bucketing_static_part POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@list_bucketing_static_part ds=2008-04-08/hr=11 @@ -793,13 +747,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 484 val_484 2008-04-08 11 484 val_484 2008-04-08 12 -PREHOOK: query: -- clean up -drop table list_bucketing_static_part +PREHOOK: query: drop table list_bucketing_static_part PREHOOK: type: DROPTABLE PREHOOK: Input: default@list_bucketing_static_part PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: -- clean up -drop table list_bucketing_static_part +POSTHOOK: query: drop table list_bucketing_static_part POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@list_bucketing_static_part POSTHOOK: Output: default@list_bucketing_static_part diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out index b46977d..e7a353f 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_5.q.out @@ -1,35 +1,21 @@ -PREHOOK: query: -- list bucketing DML: multiple skewed columns. 2 stages - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- create a skewed table -create table list_bucketing_dynamic_part (key String, value String) +PREHOOK: query: create table list_bucketing_dynamic_part (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_dynamic_part -POSTHOOK: query: -- list bucketing DML: multiple skewed columns. 2 stages - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- create a skewed table -create table list_bucketing_dynamic_part (key String, value String) +POSTHOOK: query: create table list_bucketing_dynamic_part (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_dynamic_part -PREHOOK: query: -- list bucketing DML -explain extended +PREHOOK: query: explain extended insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- list bucketing DML -explain extended +POSTHOOK: query: explain extended insert overwrite table list_bucketing_dynamic_part partition (ds='2008-04-08', hr) select key, value, hr from srcpart where ds='2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -225,12 +211,10 @@ POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=11).ke POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=12).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=12).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- check DML result -desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11') +PREHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11') PREHOOK: type: DESCTABLE PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: -- check DML result -desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11') +POSTHOOK: query: desc formatted list_bucketing_dynamic_part partition (ds='2008-04-08', hr='11') POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@list_bucketing_dynamic_part # col_name data_type comment @@ -484,13 +468,11 @@ POSTHOOK: Input: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=12 103 val_103 2008-04-08 11 103 val_103 2008-04-08 12 103 val_103 2008-04-08 12 -PREHOOK: query: -- clean up resources -drop table list_bucketing_dynamic_part +PREHOOK: query: drop table list_bucketing_dynamic_part PREHOOK: type: DROPTABLE PREHOOK: Input: default@list_bucketing_dynamic_part PREHOOK: Output: default@list_bucketing_dynamic_part -POSTHOOK: query: -- clean up resources -drop table list_bucketing_dynamic_part +POSTHOOK: query: drop table list_bucketing_dynamic_part POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@list_bucketing_dynamic_part POSTHOOK: Output: default@list_bucketing_dynamic_part diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out index 3591239..d304e0b 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_6.q.out @@ -1,46 +1,4 @@ -PREHOOK: query: -- list bucketing DML: dynamic partition. multiple skewed columns. merge. --- The following explains merge example used in this test case --- DML will generated 2 partitions --- ds=2008-04-08/hr=a1 --- ds=2008-04-08/hr=b1 --- without merge, each partition has more files --- ds=2008-04-08/hr=a1 has 2 files --- ds=2008-04-08/hr=b1 has 6 files --- with merge each partition has more files --- ds=2008-04-08/hr=a1 has 1 files --- ds=2008-04-08/hr=b1 has 4 files --- The following shows file size and name in each directory --- hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME: --- without merge --- 155 000000_0 --- 155 000001_0 --- with merge --- 254 000000_0 --- hr=b1/key=103/value=val_103: --- without merge --- 99 000000_0 --- 99 000001_0 --- with merge --- 142 000001_0 --- hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME: --- without merge --- 5181 000000_0 --- 5181 000001_0 --- with merge --- 5181 000000_0 --- 5181 000001_0 --- hr=b1/key=484/value=val_484 --- without merge --- 87 000000_0 --- 87 000001_0 --- with merge --- 118 000002_0 - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- create a skewed table -create table list_bucketing_dynamic_part (key String, value String) +PREHOOK: query: create table list_bucketing_dynamic_part (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES @@ -48,49 +6,7 @@ create table list_bucketing_dynamic_part (key String, value String) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_dynamic_part -POSTHOOK: query: -- list bucketing DML: dynamic partition. multiple skewed columns. merge. --- The following explains merge example used in this test case --- DML will generated 2 partitions --- ds=2008-04-08/hr=a1 --- ds=2008-04-08/hr=b1 --- without merge, each partition has more files --- ds=2008-04-08/hr=a1 has 2 files --- ds=2008-04-08/hr=b1 has 6 files --- with merge each partition has more files --- ds=2008-04-08/hr=a1 has 1 files --- ds=2008-04-08/hr=b1 has 4 files --- The following shows file size and name in each directory --- hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME: --- without merge --- 155 000000_0 --- 155 000001_0 --- with merge --- 254 000000_0 --- hr=b1/key=103/value=val_103: --- without merge --- 99 000000_0 --- 99 000001_0 --- with merge --- 142 000001_0 --- hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME: --- without merge --- 5181 000000_0 --- 5181 000001_0 --- with merge --- 5181 000000_0 --- 5181 000001_0 --- hr=b1/key=484/value=val_484 --- without merge --- 87 000000_0 --- 87 000001_0 --- with merge --- 118 000002_0 - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- create a skewed table -create table list_bucketing_dynamic_part (key String, value String) +POSTHOOK: query: create table list_bucketing_dynamic_part (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES @@ -98,13 +14,11 @@ create table list_bucketing_dynamic_part (key String, value String) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_dynamic_part -PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files. -explain extended +PREHOOK: query: explain extended insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files. -explain extended +POSTHOOK: query: explain extended insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY @@ -303,12 +217,10 @@ POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).ke POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- check DML result -show partitions list_bucketing_dynamic_part +PREHOOK: query: show partitions list_bucketing_dynamic_part PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: -- check DML result -show partitions list_bucketing_dynamic_part +POSTHOOK: query: show partitions list_bucketing_dynamic_part POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@list_bucketing_dynamic_part ds=2008-04-08/hr=a1 @@ -401,13 +313,11 @@ Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484/value=val_484} Storage Desc Params: serialization.format 1 -PREHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files. -explain extended +PREHOOK: query: explain extended insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files. -explain extended +POSTHOOK: query: explain extended insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY @@ -724,12 +634,10 @@ POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).ke POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- check DML result -show partitions list_bucketing_dynamic_part +PREHOOK: query: show partitions list_bucketing_dynamic_part PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: -- check DML result -show partitions list_bucketing_dynamic_part +POSTHOOK: query: show partitions list_bucketing_dynamic_part POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@list_bucketing_dynamic_part ds=2008-04-08/hr=a1 @@ -985,13 +893,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 484 val_484 2008-04-08 11 484 val_484 2008-04-08 12 -PREHOOK: query: -- clean up -drop table list_bucketing_dynamic_part +PREHOOK: query: drop table list_bucketing_dynamic_part PREHOOK: type: DROPTABLE PREHOOK: Input: default@list_bucketing_dynamic_part PREHOOK: Output: default@list_bucketing_dynamic_part -POSTHOOK: query: -- clean up -drop table list_bucketing_dynamic_part +POSTHOOK: query: drop table list_bucketing_dynamic_part POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@list_bucketing_dynamic_part POSTHOOK: Output: default@list_bucketing_dynamic_part diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out index ab8d0a8..6920b98 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out @@ -1,20 +1,4 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- list bucketing DML : dynamic partition (one level) , merge , one skewed column --- DML without merge files mixed with small and big files: --- ds=2008-04-08/hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/ --- 155 000000_0 --- ds=2008-04-08/hr=b1/key=484 --- 87 000000_0 --- 87 000001_0 --- ds=2008-04-08/hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/ --- 5201 000000_0 --- 5201 000001_0 --- DML with merge will merge small files - --- skewed table -CREATE TABLE list_bucketing_dynamic_part (key String, value STRING) +PREHOOK: query: CREATE TABLE list_bucketing_dynamic_part (key String, value STRING) PARTITIONED BY (ds string, hr string) skewed by (key) on ('484') stored as DIRECTORIES @@ -22,23 +6,7 @@ CREATE TABLE list_bucketing_dynamic_part (key String, value STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_dynamic_part -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- list bucketing DML : dynamic partition (one level) , merge , one skewed column --- DML without merge files mixed with small and big files: --- ds=2008-04-08/hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/ --- 155 000000_0 --- ds=2008-04-08/hr=b1/key=484 --- 87 000000_0 --- 87 000001_0 --- ds=2008-04-08/hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/ --- 5201 000000_0 --- 5201 000001_0 --- DML with merge will merge small files - --- skewed table -CREATE TABLE list_bucketing_dynamic_part (key String, value STRING) +POSTHOOK: query: CREATE TABLE list_bucketing_dynamic_part (key String, value STRING) PARTITIONED BY (ds string, hr string) skewed by (key) on ('484') stored as DIRECTORIES @@ -46,13 +14,11 @@ CREATE TABLE list_bucketing_dynamic_part (key String, value STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_dynamic_part -PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files. -explain extended +PREHOOK: query: explain extended insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files. -explain extended +POSTHOOK: query: explain extended insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY @@ -251,12 +217,10 @@ POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).ke POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- check DML result -show partitions list_bucketing_dynamic_part +PREHOOK: query: show partitions list_bucketing_dynamic_part PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: -- check DML result -show partitions list_bucketing_dynamic_part +POSTHOOK: query: show partitions list_bucketing_dynamic_part POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@list_bucketing_dynamic_part ds=2008-04-08/hr=a1 @@ -349,13 +313,11 @@ Skewed Values: [[484]] Skewed Value to Truncated Path: {[484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484} Storage Desc Params: serialization.format 1 -PREHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files. -explain extended +PREHOOK: query: explain extended insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files. -explain extended +POSTHOOK: query: explain extended insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY @@ -672,12 +634,10 @@ POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).ke POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- check DML result -show partitions list_bucketing_dynamic_part +PREHOOK: query: show partitions list_bucketing_dynamic_part PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: -- check DML result -show partitions list_bucketing_dynamic_part +POSTHOOK: query: show partitions list_bucketing_dynamic_part POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@list_bucketing_dynamic_part ds=2008-04-08/hr=a1 @@ -933,13 +893,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 484 val_484 2008-04-08 11 484 val_484 2008-04-08 12 -PREHOOK: query: -- clean up -drop table list_bucketing_dynamic_part +PREHOOK: query: drop table list_bucketing_dynamic_part PREHOOK: type: DROPTABLE PREHOOK: Input: default@list_bucketing_dynamic_part PREHOOK: Output: default@list_bucketing_dynamic_part -POSTHOOK: query: -- clean up -drop table list_bucketing_dynamic_part +POSTHOOK: query: drop table list_bucketing_dynamic_part POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@list_bucketing_dynamic_part POSTHOOK: Output: default@list_bucketing_dynamic_part diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out index 2640f8d..c1b5e86 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_8.q.out @@ -1,47 +1,4 @@ -PREHOOK: query: -- list bucketing alter table ... concatenate: --- Use list bucketing DML to generate mutilple files in partitions by turning off merge --- dynamic partition. multiple skewed columns. merge. --- The following explains merge example used in this test case --- DML will generated 2 partitions --- ds=2008-04-08/hr=a1 --- ds=2008-04-08/hr=b1 --- without merge, each partition has more files --- ds=2008-04-08/hr=a1 has 2 files --- ds=2008-04-08/hr=b1 has 6 files --- with merge each partition has more files --- ds=2008-04-08/hr=a1 has 1 files --- ds=2008-04-08/hr=b1 has 4 files --- The following shows file size and name in each directory --- hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME: --- without merge --- 155 000000_0 --- 155 000001_0 --- with merge --- 254 000000_0 --- hr=b1/key=103/value=val_103: --- without merge --- 99 000000_0 --- 99 000001_0 --- with merge --- 142 000001_0 --- hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME: --- without merge --- 5181 000000_0 --- 5181 000001_0 --- with merge --- 5181 000000_0 --- 5181 000001_0 --- hr=b1/key=484/value=val_484 --- without merge --- 87 000000_0 --- 87 000001_0 --- with merge --- 118 000002_0 - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - --- create a skewed table -create table list_bucketing_dynamic_part (key String, value String) +PREHOOK: query: create table list_bucketing_dynamic_part (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES @@ -49,50 +6,7 @@ create table list_bucketing_dynamic_part (key String, value String) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_dynamic_part -POSTHOOK: query: -- list bucketing alter table ... concatenate: --- Use list bucketing DML to generate mutilple files in partitions by turning off merge --- dynamic partition. multiple skewed columns. merge. --- The following explains merge example used in this test case --- DML will generated 2 partitions --- ds=2008-04-08/hr=a1 --- ds=2008-04-08/hr=b1 --- without merge, each partition has more files --- ds=2008-04-08/hr=a1 has 2 files --- ds=2008-04-08/hr=b1 has 6 files --- with merge each partition has more files --- ds=2008-04-08/hr=a1 has 1 files --- ds=2008-04-08/hr=b1 has 4 files --- The following shows file size and name in each directory --- hr=a1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME: --- without merge --- 155 000000_0 --- 155 000001_0 --- with merge --- 254 000000_0 --- hr=b1/key=103/value=val_103: --- without merge --- 99 000000_0 --- 99 000001_0 --- with merge --- 142 000001_0 --- hr=b1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME: --- without merge --- 5181 000000_0 --- 5181 000001_0 --- with merge --- 5181 000000_0 --- 5181 000001_0 --- hr=b1/key=484/value=val_484 --- without merge --- 87 000000_0 --- 87 000001_0 --- with merge --- 118 000002_0 - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - --- create a skewed table -create table list_bucketing_dynamic_part (key String, value String) +POSTHOOK: query: create table list_bucketing_dynamic_part (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES @@ -100,13 +14,11 @@ create table list_bucketing_dynamic_part (key String, value String) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_dynamic_part -PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files. -explain extended +PREHOOK: query: explain extended insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files. -explain extended +POSTHOOK: query: explain extended insert overwrite table list_bucketing_dynamic_part partition (ds = '2008-04-08', hr) select key, value, if(key % 100 == 0, 'a1', 'b1') from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY @@ -305,12 +217,10 @@ POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).ke POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=a1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_dynamic_part PARTITION(ds=2008-04-08,hr=b1).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- check DML result -show partitions list_bucketing_dynamic_part +PREHOOK: query: show partitions list_bucketing_dynamic_part PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@list_bucketing_dynamic_part -POSTHOOK: query: -- check DML result -show partitions list_bucketing_dynamic_part +POSTHOOK: query: show partitions list_bucketing_dynamic_part POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@list_bucketing_dynamic_part ds=2008-04-08/hr=a1 @@ -403,13 +313,11 @@ Skewed Values: [[103, val_103], [484, val_484], [51, val_14]] Skewed Value to Truncated Path: {[103, val_103]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=103/value=val_103, [484, val_484]=/list_bucketing_dynamic_part/ds=2008-04-08/hr=b1/key=484/value=val_484} Storage Desc Params: serialization.format 1 -PREHOOK: query: -- concatenate the partition and it will merge files -alter table list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') concatenate +PREHOOK: query: alter table list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') concatenate PREHOOK: type: ALTER_PARTITION_MERGE PREHOOK: Input: default@list_bucketing_dynamic_part PREHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1 -POSTHOOK: query: -- concatenate the partition and it will merge files -alter table list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') concatenate +POSTHOOK: query: alter table list_bucketing_dynamic_part partition (ds='2008-04-08', hr='b1') concatenate POSTHOOK: type: ALTER_PARTITION_MERGE POSTHOOK: Input: default@list_bucketing_dynamic_part POSTHOOK: Output: default@list_bucketing_dynamic_part@ds=2008-04-08/hr=b1 @@ -623,13 +531,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 484 val_484 2008-04-08 11 484 val_484 2008-04-08 12 -PREHOOK: query: -- clean up -drop table list_bucketing_dynamic_part +PREHOOK: query: drop table list_bucketing_dynamic_part PREHOOK: type: DROPTABLE PREHOOK: Input: default@list_bucketing_dynamic_part PREHOOK: Output: default@list_bucketing_dynamic_part -POSTHOOK: query: -- clean up -drop table list_bucketing_dynamic_part +POSTHOOK: query: drop table list_bucketing_dynamic_part POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@list_bucketing_dynamic_part POSTHOOK: Output: default@list_bucketing_dynamic_part diff --git a/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out b/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out index 400f23b..5bf1125 100644 --- a/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_dml_9.q.out @@ -1,23 +1,4 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- list bucketing DML: static partition. multiple skewed columns. merge. --- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME: --- 5263 000000_0 --- 5263 000001_0 --- ds=2008-04-08/hr=11/key=103: --- 99 000000_0 --- 99 000001_0 --- after merge --- 142 000000_0 --- ds=2008-04-08/hr=11/key=484: --- 87 000000_0 --- 87 000001_0 --- after merge --- 118 000001_0 - --- create a skewed table -create table list_bucketing_static_part (key String, value String) +PREHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ('484','103') stored as DIRECTORIES @@ -25,26 +6,7 @@ create table list_bucketing_static_part (key String, value String) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- list bucketing DML: static partition. multiple skewed columns. merge. --- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME: --- 5263 000000_0 --- 5263 000001_0 --- ds=2008-04-08/hr=11/key=103: --- 99 000000_0 --- 99 000001_0 --- after merge --- 142 000000_0 --- ds=2008-04-08/hr=11/key=484: --- 87 000000_0 --- 87 000001_0 --- after merge --- 118 000001_0 - --- create a skewed table -create table list_bucketing_static_part (key String, value String) +POSTHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ('484','103') stored as DIRECTORIES @@ -52,13 +14,11 @@ create table list_bucketing_static_part (key String, value String) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_static_part -PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files. -explain extended +PREHOOK: query: explain extended insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files. -explain extended +POSTHOOK: query: explain extended insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY @@ -254,12 +214,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- check DML result -show partitions list_bucketing_static_part +PREHOOK: query: show partitions list_bucketing_static_part PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: -- check DML result -show partitions list_bucketing_static_part +POSTHOOK: query: show partitions list_bucketing_static_part POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@list_bucketing_static_part ds=2008-04-08/hr=11 @@ -308,13 +266,11 @@ Skewed Values: [[103], [484]] Skewed Value to Truncated Path: {[103]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=103, [484]=/list_bucketing_static_part/ds=2008-04-08/hr=11/key=484} Storage Desc Params: serialization.format 1 -PREHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files. -explain extended +PREHOOK: query: explain extended insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- list bucketing DML with merge. use bucketize to generate a few small files. -explain extended +POSTHOOK: query: explain extended insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY @@ -622,12 +578,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- check DML result -show partitions list_bucketing_static_part +PREHOOK: query: show partitions list_bucketing_static_part PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: -- check DML result -show partitions list_bucketing_static_part +POSTHOOK: query: show partitions list_bucketing_static_part POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@list_bucketing_static_part ds=2008-04-08/hr=11 @@ -793,13 +747,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 484 val_484 2008-04-08 11 484 val_484 2008-04-08 12 -PREHOOK: query: -- clean up -drop table list_bucketing_static_part +PREHOOK: query: drop table list_bucketing_static_part PREHOOK: type: DROPTABLE PREHOOK: Input: default@list_bucketing_static_part PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: -- clean up -drop table list_bucketing_static_part +POSTHOOK: query: drop table list_bucketing_static_part POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@list_bucketing_static_part POSTHOOK: Output: default@list_bucketing_static_part diff --git a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out index e0af0c6..b44352f 100644 --- a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out @@ -1,39 +1,11 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - --- List bucketing query logic test case. We simulate the directory structure by DML here. --- Test condition: --- 1. where clause has multiple skewed columns --- 2. where clause doesn't have non-skewed column --- 3. where clause has one and operator --- Test focus: --- 1. basic list bucketing query work --- Test result: --- 1. pruner only pick up right directory --- 2. query result is right - --- create a skewed table -create table fact_daily (key String, value String) +PREHOOK: query: create table fact_daily (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@fact_daily -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - --- List bucketing query logic test case. We simulate the directory structure by DML here. --- Test condition: --- 1. where clause has multiple skewed columns --- 2. where clause doesn't have non-skewed column --- 3. where clause has one and operator --- Test focus: --- 1. basic list bucketing query work --- Test result: --- 1. pruner only pick up right directory --- 2. query result is right - --- create a skewed table -create table fact_daily (key String, value String) +POSTHOOK: query: create table fact_daily (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES @@ -106,13 +78,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@fact_daily #### A masked pattern was here #### 500 -PREHOOK: query: -- pruner only pick up skewed-value directory --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT key FROM fact_daily WHERE ( ds='1' and hr='4') and (key='484' and value= 'val_484') +PREHOOK: query: explain extended SELECT key FROM fact_daily WHERE ( ds='1' and hr='4') and (key='484' and value= 'val_484') PREHOOK: type: QUERY -POSTHOOK: query: -- pruner only pick up skewed-value directory --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT key FROM fact_daily WHERE ( ds='1' and hr='4') and (key='484' and value= 'val_484') +POSTHOOK: query: explain extended SELECT key FROM fact_daily WHERE ( ds='1' and hr='4') and (key='484' and value= 'val_484') POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -181,26 +149,20 @@ STAGE PLANS: Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- List Bucketing Query -SELECT key FROM fact_daily WHERE ( ds='1' and hr='4') and (key='484' and value= 'val_484') +PREHOOK: query: SELECT key FROM fact_daily WHERE ( ds='1' and hr='4') and (key='484' and value= 'val_484') PREHOOK: type: QUERY PREHOOK: Input: default@fact_daily PREHOOK: Input: default@fact_daily@ds=1/hr=4 #### A masked pattern was here #### -POSTHOOK: query: -- List Bucketing Query -SELECT key FROM fact_daily WHERE ( ds='1' and hr='4') and (key='484' and value= 'val_484') +POSTHOOK: query: SELECT key FROM fact_daily WHERE ( ds='1' and hr='4') and (key='484' and value= 'val_484') POSTHOOK: type: QUERY POSTHOOK: Input: default@fact_daily POSTHOOK: Input: default@fact_daily@ds=1/hr=4 #### A masked pattern was here #### 484 -PREHOOK: query: -- pruner only pick up skewed-value directory --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT key,value FROM fact_daily WHERE ( ds='1' and hr='4') and (key='238' and value= 'val_238') +PREHOOK: query: explain extended SELECT key,value FROM fact_daily WHERE ( ds='1' and hr='4') and (key='238' and value= 'val_238') PREHOOK: type: QUERY -POSTHOOK: query: -- pruner only pick up skewed-value directory --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT key,value FROM fact_daily WHERE ( ds='1' and hr='4') and (key='238' and value= 'val_238') +POSTHOOK: query: explain extended SELECT key,value FROM fact_daily WHERE ( ds='1' and hr='4') and (key='238' and value= 'val_238') POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -269,27 +231,21 @@ STAGE PLANS: Statistics: Num rows: 125 Data size: 1328 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- List Bucketing Query -SELECT key,value FROM fact_daily WHERE ( ds='1' and hr='4') and (key='238' and value= 'val_238') +PREHOOK: query: SELECT key,value FROM fact_daily WHERE ( ds='1' and hr='4') and (key='238' and value= 'val_238') PREHOOK: type: QUERY PREHOOK: Input: default@fact_daily PREHOOK: Input: default@fact_daily@ds=1/hr=4 #### A masked pattern was here #### -POSTHOOK: query: -- List Bucketing Query -SELECT key,value FROM fact_daily WHERE ( ds='1' and hr='4') and (key='238' and value= 'val_238') +POSTHOOK: query: SELECT key,value FROM fact_daily WHERE ( ds='1' and hr='4') and (key='238' and value= 'val_238') POSTHOOK: type: QUERY POSTHOOK: Input: default@fact_daily POSTHOOK: Input: default@fact_daily@ds=1/hr=4 #### A masked pattern was here #### 238 val_238 238 val_238 -PREHOOK: query: -- pruner only pick up default directory --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT key FROM fact_daily WHERE ( ds='1' and hr='4') and (value = "3") +PREHOOK: query: explain extended SELECT key FROM fact_daily WHERE ( ds='1' and hr='4') and (value = "3") PREHOOK: type: QUERY -POSTHOOK: query: -- pruner only pick up default directory --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT key FROM fact_daily WHERE ( ds='1' and hr='4') and (value = "3") +POSTHOOK: query: explain extended SELECT key FROM fact_daily WHERE ( ds='1' and hr='4') and (value = "3") POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -358,25 +314,19 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- List Bucketing Query -SELECT key FROM fact_daily WHERE ( ds='1' and hr='4') and (value = "3") +PREHOOK: query: SELECT key FROM fact_daily WHERE ( ds='1' and hr='4') and (value = "3") PREHOOK: type: QUERY PREHOOK: Input: default@fact_daily PREHOOK: Input: default@fact_daily@ds=1/hr=4 #### A masked pattern was here #### -POSTHOOK: query: -- List Bucketing Query -SELECT key FROM fact_daily WHERE ( ds='1' and hr='4') and (value = "3") +POSTHOOK: query: SELECT key FROM fact_daily WHERE ( ds='1' and hr='4') and (value = "3") POSTHOOK: type: QUERY POSTHOOK: Input: default@fact_daily POSTHOOK: Input: default@fact_daily@ds=1/hr=4 #### A masked pattern was here #### -PREHOOK: query: -- pruner only pick up default directory --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT key,value FROM fact_daily WHERE ( ds='1' and hr='4') and key = '495' +PREHOOK: query: explain extended SELECT key,value FROM fact_daily WHERE ( ds='1' and hr='4') and key = '495' PREHOOK: type: QUERY -POSTHOOK: query: -- pruner only pick up default directory --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT key,value FROM fact_daily WHERE ( ds='1' and hr='4') and key = '495' +POSTHOOK: query: explain extended SELECT key,value FROM fact_daily WHERE ( ds='1' and hr='4') and key = '495' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -445,14 +395,12 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- List Bucketing Query -SELECT key,value FROM fact_daily WHERE ( ds='1' and hr='4') and key = '369' +PREHOOK: query: SELECT key,value FROM fact_daily WHERE ( ds='1' and hr='4') and key = '369' PREHOOK: type: QUERY PREHOOK: Input: default@fact_daily PREHOOK: Input: default@fact_daily@ds=1/hr=4 #### A masked pattern was here #### -POSTHOOK: query: -- List Bucketing Query -SELECT key,value FROM fact_daily WHERE ( ds='1' and hr='4') and key = '369' +POSTHOOK: query: SELECT key,value FROM fact_daily WHERE ( ds='1' and hr='4') and key = '369' POSTHOOK: type: QUERY POSTHOOK: Input: default@fact_daily POSTHOOK: Input: default@fact_daily@ds=1/hr=4 diff --git a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out index dcc45d6..7fb7d80 100644 --- a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out @@ -1,39 +1,11 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- List bucketing query logic test case. We simulate the directory structure by DML here. --- Test condition: --- 1. where clause has multiple skewed columns and non-skewed columns --- 3. where clause has a few operators --- Test focus: --- 1. basic list bucketing query work --- Test result: --- 1. pruner only pick up right directory --- 2. query result is right - --- create a skewed table -create table fact_daily (key String, value String) +PREHOOK: query: create table fact_daily (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@fact_daily -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- List bucketing query logic test case. We simulate the directory structure by DML here. --- Test condition: --- 1. where clause has multiple skewed columns and non-skewed columns --- 3. where clause has a few operators --- Test focus: --- 1. basic list bucketing query work --- Test result: --- 1. pruner only pick up right directory --- 2. query result is right - --- create a skewed table -create table fact_daily (key String, value String) +POSTHOOK: query: create table fact_daily (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES @@ -106,13 +78,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@fact_daily #### A masked pattern was here #### 500 -PREHOOK: query: -- pruner only pick up default directory --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and value= 'val_484' +PREHOOK: query: explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and value= 'val_484' PREHOOK: type: QUERY -POSTHOOK: query: -- pruner only pick up default directory --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and value= 'val_484' +POSTHOOK: query: explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and value= 'val_484' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -181,26 +149,20 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- List Bucketing Query -SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and value= 'val_484' +PREHOOK: query: SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and value= 'val_484' PREHOOK: type: QUERY PREHOOK: Input: default@fact_daily PREHOOK: Input: default@fact_daily@ds=1/hr=4 #### A masked pattern was here #### -POSTHOOK: query: -- List Bucketing Query -SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and value= 'val_484' +POSTHOOK: query: SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and value= 'val_484' POSTHOOK: type: QUERY POSTHOOK: Input: default@fact_daily POSTHOOK: Input: default@fact_daily@ds=1/hr=4 #### A masked pattern was here #### 484 val_484 -PREHOOK: query: -- pruner only pick up default directory --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT key FROM fact_daily WHERE ds='1' and hr='4' and key= '406' +PREHOOK: query: explain extended SELECT key FROM fact_daily WHERE ds='1' and hr='4' and key= '406' PREHOOK: type: QUERY -POSTHOOK: query: -- pruner only pick up default directory --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT key FROM fact_daily WHERE ds='1' and hr='4' and key= '406' +POSTHOOK: query: explain extended SELECT key FROM fact_daily WHERE ds='1' and hr='4' and key= '406' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -269,14 +231,12 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- List Bucketing Query -SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and key= '406' +PREHOOK: query: SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and key= '406' PREHOOK: type: QUERY PREHOOK: Input: default@fact_daily PREHOOK: Input: default@fact_daily@ds=1/hr=4 #### A masked pattern was here #### -POSTHOOK: query: -- List Bucketing Query -SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and key= '406' +POSTHOOK: query: SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and key= '406' POSTHOOK: type: QUERY POSTHOOK: Input: default@fact_daily POSTHOOK: Input: default@fact_daily@ds=1/hr=4 @@ -285,13 +245,9 @@ POSTHOOK: Input: default@fact_daily@ds=1/hr=4 406 val_406 406 val_406 406 val_406 -PREHOOK: query: -- pruner only pick up skewed-value directory --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')) +PREHOOK: query: explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')) PREHOOK: type: QUERY -POSTHOOK: query: -- pruner only pick up skewed-value directory --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')) +POSTHOOK: query: explain extended SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -360,14 +316,12 @@ STAGE PLANS: Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- List Bucketing Query -SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')) +PREHOOK: query: SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')) PREHOOK: type: QUERY PREHOOK: Input: default@fact_daily PREHOOK: Input: default@fact_daily@ds=1/hr=4 #### A masked pattern was here #### -POSTHOOK: query: -- List Bucketing Query -SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')) +POSTHOOK: query: SELECT key, value FROM fact_daily WHERE ds='1' and hr='4' and ( (key='484' and value ='val_484') or (key='238' and value= 'val_238')) POSTHOOK: type: QUERY POSTHOOK: Input: default@fact_daily POSTHOOK: Input: default@fact_daily@ds=1/hr=4 @@ -375,13 +329,11 @@ POSTHOOK: Input: default@fact_daily@ds=1/hr=4 238 val_238 238 val_238 484 val_484 -PREHOOK: query: -- clean up -drop table fact_daily +PREHOOK: query: drop table fact_daily PREHOOK: type: DROPTABLE PREHOOK: Input: default@fact_daily PREHOOK: Output: default@fact_daily -POSTHOOK: query: -- clean up -drop table fact_daily +POSTHOOK: query: drop table fact_daily POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@fact_daily POSTHOOK: Output: default@fact_daily diff --git a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out index 7da4e7c..80ba085 100644 --- a/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out @@ -1,51 +1,19 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - --- List bucketing query logic test case. We simulate the directory structure by DML here. --- Test condition: --- 1. where clause has multiple skewed columns and non-skewed columns --- 3. where clause has a few operators --- Test focus: --- 1. query works for on partition level. --- A table can mix up non-skewed partition and skewed partition --- Even for skewed partition, it can have different skewed information. --- Test result: --- 1. pruner only pick up right directory --- 2. query result is right - --- create a skewed table -create table fact_daily (key String, value String) +PREHOOK: query: create table fact_daily (key String, value String) partitioned by (ds String, hr String) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@fact_daily -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - --- List bucketing query logic test case. We simulate the directory structure by DML here. --- Test condition: --- 1. where clause has multiple skewed columns and non-skewed columns --- 3. where clause has a few operators --- Test focus: --- 1. query works for on partition level. --- A table can mix up non-skewed partition and skewed partition --- Even for skewed partition, it can have different skewed information. --- Test result: --- 1. pruner only pick up right directory --- 2. query result is right - --- create a skewed table -create table fact_daily (key String, value String) +POSTHOOK: query: create table fact_daily (key String, value String) partitioned by (ds String, hr String) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@fact_daily -PREHOOK: query: -- partition no skew -insert overwrite table fact_daily partition (ds = '1', hr = '1') +PREHOOK: query: insert overwrite table fact_daily partition (ds = '1', hr = '1') select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@fact_daily@ds=1/hr=1 -POSTHOOK: query: -- partition no skew -insert overwrite table fact_daily partition (ds = '1', hr = '1') +POSTHOOK: query: insert overwrite table fact_daily partition (ds = '1', hr = '1') select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -92,13 +60,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- partition. skewed value is 484/238 -alter table fact_daily skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES +PREHOOK: query: alter table fact_daily skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES PREHOOK: type: ALTERTABLE_SKEWED PREHOOK: Input: default@fact_daily PREHOOK: Output: default@fact_daily -POSTHOOK: query: -- partition. skewed value is 484/238 -alter table fact_daily skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES +POSTHOOK: query: alter table fact_daily skewed by (key, value) on (('484','val_484'),('238','val_238')) stored as DIRECTORIES POSTHOOK: type: ALTERTABLE_SKEWED POSTHOOK: Input: default@fact_daily POSTHOOK: Output: default@fact_daily @@ -159,13 +125,11 @@ Skewed Values: [[238, val_238], [484, val_484]] Skewed Value to Truncated Path: {[238, val_238]=/fact_daily/ds=1/hr=2/key=238/value=val_238, [484, val_484]=/fact_daily/ds=1/hr=2/key=484/value=val_484} Storage Desc Params: serialization.format 1 -PREHOOK: query: -- another partition. skewed value is 327 -alter table fact_daily skewed by (key, value) on (('327','val_327')) stored as DIRECTORIES +PREHOOK: query: alter table fact_daily skewed by (key, value) on (('327','val_327')) stored as DIRECTORIES PREHOOK: type: ALTERTABLE_SKEWED PREHOOK: Input: default@fact_daily PREHOOK: Output: default@fact_daily -POSTHOOK: query: -- another partition. skewed value is 327 -alter table fact_daily skewed by (key, value) on (('327','val_327')) stored as DIRECTORIES +POSTHOOK: query: alter table fact_daily skewed by (key, value) on (('327','val_327')) stored as DIRECTORIES POSTHOOK: type: ALTERTABLE_SKEWED POSTHOOK: Input: default@fact_daily POSTHOOK: Output: default@fact_daily @@ -226,12 +190,10 @@ Skewed Values: [[327, val_327]] Skewed Value to Truncated Path: {[327, val_327]=/fact_daily/ds=1/hr=3/key=327/value=val_327} Storage Desc Params: serialization.format 1 -PREHOOK: query: -- query non-skewed partition -explain extended +PREHOOK: query: explain extended select * from fact_daily where ds = '1' and hr='1' and key='145' PREHOOK: type: QUERY -POSTHOOK: query: -- query non-skewed partition -explain extended +POSTHOOK: query: explain extended select * from fact_daily where ds = '1' and hr='1' and key='145' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -337,12 +299,10 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@fact_daily #### A masked pattern was here #### 500 -PREHOOK: query: -- query skewed partition -explain extended +PREHOOK: query: explain extended SELECT * FROM fact_daily WHERE ds='1' and hr='2' and (key='484' and value='val_484') PREHOOK: type: QUERY -POSTHOOK: query: -- query skewed partition -explain extended +POSTHOOK: query: explain extended SELECT * FROM fact_daily WHERE ds='1' and hr='2' and (key='484' and value='val_484') POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -423,12 +383,10 @@ POSTHOOK: Input: default@fact_daily POSTHOOK: Input: default@fact_daily@ds=1/hr=2 #### A masked pattern was here #### 484 val_484 1 2 -PREHOOK: query: -- query another skewed partition -explain extended +PREHOOK: query: explain extended SELECT * FROM fact_daily WHERE ds='1' and hr='3' and (key='327' and value='val_327') PREHOOK: type: QUERY -POSTHOOK: query: -- query another skewed partition -explain extended +POSTHOOK: query: explain extended SELECT * FROM fact_daily WHERE ds='1' and hr='3' and (key='327' and value='val_327') POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out index b415ba5..8f21705 100644 --- a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out @@ -1,38 +1,8 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- List bucketing query logic test case. --- Test condition: --- 1. where clause has only one skewed column --- 2. where clause doesn't have non-skewed column --- 3. where clause has one and operator --- Test result: --- 1. pruner only pick up right directory --- 2. query result is right - --- create 2 tables: fact_daily and fact_tz --- fact_daily will be used for list bucketing query --- fact_tz is a table used to prepare data and test directories -CREATE TABLE fact_daily(x int) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE fact_daily(x int) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@fact_daily -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- List bucketing query logic test case. --- Test condition: --- 1. where clause has only one skewed column --- 2. where clause doesn't have non-skewed column --- 3. where clause has one and operator --- Test result: --- 1. pruner only pick up right directory --- 2. query result is right - --- create 2 tables: fact_daily and fact_tz --- fact_daily will be used for list bucketing query --- fact_tz is a table used to prepare data and test directories -CREATE TABLE fact_daily(x int) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE fact_daily(x int) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@fact_daily @@ -48,40 +18,34 @@ POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default POSTHOOK: Output: default@fact_tz -PREHOOK: query: -- create /fact_tz/ds=1/hr=1 directory -INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') +PREHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') SELECT key FROM src WHERE key=484 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@fact_tz@ds=1/hr=1 -POSTHOOK: query: -- create /fact_tz/ds=1/hr=1 directory -INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') +POSTHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') SELECT key FROM src WHERE key=484 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@fact_tz@ds=1/hr=1 POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=1).x EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: -- create /fact_tz/ds=1/hr=2 directory -INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2') +PREHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2') SELECT key+11 FROM src WHERE key=484 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@fact_tz@ds=1/hr=2 -POSTHOOK: query: -- create /fact_tz/ds=1/hr=2 directory -INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2') +POSTHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2') SELECT key+11 FROM src WHERE key=484 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@fact_tz@ds=1/hr=2 POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=2).x EXPRESSION [] #### A masked pattern was here #### -PREHOOK: query: -- switch fact_daily to skewed table and point its location to /fact_tz/ds=1 -alter table fact_daily skewed by (x) on (484) +PREHOOK: query: alter table fact_daily skewed by (x) on (484) PREHOOK: type: ALTERTABLE_SKEWED PREHOOK: Input: default@fact_daily PREHOOK: Output: default@fact_daily -POSTHOOK: query: -- switch fact_daily to skewed table and point its location to /fact_tz/ds=1 -alter table fact_daily skewed by (x) on (484) +POSTHOOK: query: alter table fact_daily skewed by (x) on (484) POSTHOOK: type: ALTERTABLE_SKEWED POSTHOOK: Input: default@fact_daily POSTHOOK: Output: default@fact_daily @@ -104,14 +68,11 @@ POSTHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### POSTHOOK: Output: default@fact_daily POSTHOOK: Output: default@fact_daily@ds=1 -PREHOOK: query: -- set List Bucketing location map #### A masked pattern was here #### PREHOOK: type: ALTERTBLPART_SKEWED_LOCATION PREHOOK: Input: default@fact_daily PREHOOK: Output: default@fact_daily@ds=1 #### A masked pattern was here #### -POSTHOOK: query: -- set List Bucketing location map -#### A masked pattern was here #### POSTHOOK: type: ALTERTBLPART_SKEWED_LOCATION POSTHOOK: Input: default@fact_daily POSTHOOK: Input: default@fact_daily@ds=1 @@ -168,13 +129,9 @@ POSTHOOK: Input: default@fact_daily@ds=1 #### A masked pattern was here #### 484 1 495 1 -PREHOOK: query: -- pruner only pick up skewed-value directory --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT x FROM fact_daily WHERE ds='1' and x=484 +PREHOOK: query: explain extended SELECT x FROM fact_daily WHERE ds='1' and x=484 PREHOOK: type: QUERY -POSTHOOK: query: -- pruner only pick up skewed-value directory --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT x FROM fact_daily WHERE ds='1' and x=484 +POSTHOOK: query: explain extended SELECT x FROM fact_daily WHERE ds='1' and x=484 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -240,26 +197,20 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- List Bucketing Query -SELECT x FROM fact_daily WHERE ds='1' and x=484 +PREHOOK: query: SELECT x FROM fact_daily WHERE ds='1' and x=484 PREHOOK: type: QUERY PREHOOK: Input: default@fact_daily PREHOOK: Input: default@fact_daily@ds=1 #### A masked pattern was here #### -POSTHOOK: query: -- List Bucketing Query -SELECT x FROM fact_daily WHERE ds='1' and x=484 +POSTHOOK: query: SELECT x FROM fact_daily WHERE ds='1' and x=484 POSTHOOK: type: QUERY POSTHOOK: Input: default@fact_daily POSTHOOK: Input: default@fact_daily@ds=1 #### A masked pattern was here #### 484 -PREHOOK: query: -- pruner only pick up default directory since x equal to non-skewed value --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT x FROM fact_daily WHERE ds='1' and x=495 +PREHOOK: query: explain extended SELECT x FROM fact_daily WHERE ds='1' and x=495 PREHOOK: type: QUERY -POSTHOOK: query: -- pruner only pick up default directory since x equal to non-skewed value --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT x FROM fact_daily WHERE ds='1' and x=495 +POSTHOOK: query: explain extended SELECT x FROM fact_daily WHERE ds='1' and x=495 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -325,14 +276,12 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- List Bucketing Query -SELECT x FROM fact_daily WHERE ds='1' and x=495 +PREHOOK: query: SELECT x FROM fact_daily WHERE ds='1' and x=495 PREHOOK: type: QUERY PREHOOK: Input: default@fact_daily PREHOOK: Input: default@fact_daily@ds=1 #### A masked pattern was here #### -POSTHOOK: query: -- List Bucketing Query -SELECT x FROM fact_daily WHERE ds='1' and x=495 +POSTHOOK: query: SELECT x FROM fact_daily WHERE ds='1' and x=495 POSTHOOK: type: QUERY POSTHOOK: Input: default@fact_daily POSTHOOK: Input: default@fact_daily@ds=1 diff --git a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out index 45f2481..c7b879c 100644 --- a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out @@ -1,40 +1,8 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- List bucketing query logic test case. --- Test condition: --- 1. where clause has only one skewed column --- 2. where clause doesn't have non-skewed column --- Test focus: --- 1. list bucketing query logic works fine for subquery --- Test result: --- 1. pruner only pick up right directory --- 2. query result is right - --- create 2 tables: fact_daily and fact_tz --- fact_daily will be used for list bucketing query --- fact_tz is a table used to prepare data and test directories -CREATE TABLE fact_daily(x int, y STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE fact_daily(x int, y STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@fact_daily -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- List bucketing query logic test case. --- Test condition: --- 1. where clause has only one skewed column --- 2. where clause doesn't have non-skewed column --- Test focus: --- 1. list bucketing query logic works fine for subquery --- Test result: --- 1. pruner only pick up right directory --- 2. query result is right - --- create 2 tables: fact_daily and fact_tz --- fact_daily will be used for list bucketing query --- fact_tz is a table used to prepare data and test directories -CREATE TABLE fact_daily(x int, y STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE fact_daily(x int, y STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@fact_daily @@ -50,28 +18,24 @@ POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default POSTHOOK: Output: default@fact_tz -PREHOOK: query: -- create /fact_tz/ds=1/hr=1 directory -INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') +PREHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') SELECT key, value FROM src WHERE key=484 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@fact_tz@ds=1/hr=1 -POSTHOOK: query: -- create /fact_tz/ds=1/hr=1 directory -INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') +POSTHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') SELECT key, value FROM src WHERE key=484 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@fact_tz@ds=1/hr=1 POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=1).x EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=1).y SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- create /fact_tz/ds=1/hr=2 directory -INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2') +PREHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2') SELECT key+11, value FROM src WHERE key=484 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@fact_tz@ds=1/hr=2 -POSTHOOK: query: -- create /fact_tz/ds=1/hr=2 directory -INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2') +POSTHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2') SELECT key+11, value FROM src WHERE key=484 POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -79,13 +43,11 @@ POSTHOOK: Output: default@fact_tz@ds=1/hr=2 POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=2).x EXPRESSION [] POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=2).y SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] #### A masked pattern was here #### -PREHOOK: query: -- switch fact_daily to skewed table and point its location to /fact_tz/ds=1 -alter table fact_daily skewed by (x) on (484) +PREHOOK: query: alter table fact_daily skewed by (x) on (484) PREHOOK: type: ALTERTABLE_SKEWED PREHOOK: Input: default@fact_daily PREHOOK: Output: default@fact_daily -POSTHOOK: query: -- switch fact_daily to skewed table and point its location to /fact_tz/ds=1 -alter table fact_daily skewed by (x) on (484) +POSTHOOK: query: alter table fact_daily skewed by (x) on (484) POSTHOOK: type: ALTERTABLE_SKEWED POSTHOOK: Input: default@fact_daily POSTHOOK: Output: default@fact_daily @@ -108,14 +70,11 @@ POSTHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### POSTHOOK: Output: default@fact_daily POSTHOOK: Output: default@fact_daily@ds=1 -PREHOOK: query: -- set List Bucketing location map #### A masked pattern was here #### PREHOOK: type: ALTERTBLPART_SKEWED_LOCATION PREHOOK: Input: default@fact_daily PREHOOK: Output: default@fact_daily@ds=1 #### A masked pattern was here #### -POSTHOOK: query: -- set List Bucketing location map -#### A masked pattern was here #### POSTHOOK: type: ALTERTBLPART_SKEWED_LOCATION POSTHOOK: Input: default@fact_daily POSTHOOK: Input: default@fact_daily@ds=1 @@ -173,13 +132,9 @@ POSTHOOK: Input: default@fact_daily@ds=1 #### A masked pattern was here #### 484 val_484 1 495 val_484 1 -PREHOOK: query: -- The first subquery --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended select x from (select x from fact_daily where ds = '1') subq where x = 484 +PREHOOK: query: explain extended select x from (select x from fact_daily where ds = '1') subq where x = 484 PREHOOK: type: QUERY -POSTHOOK: query: -- The first subquery --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended select x from (select x from fact_daily where ds = '1') subq where x = 484 +POSTHOOK: query: explain extended select x from (select x from fact_daily where ds = '1') subq where x = 484 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -245,26 +200,20 @@ STAGE PLANS: Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- List Bucketing Query -select x from (select * from fact_daily where ds = '1') subq where x = 484 +PREHOOK: query: select x from (select * from fact_daily where ds = '1') subq where x = 484 PREHOOK: type: QUERY PREHOOK: Input: default@fact_daily PREHOOK: Input: default@fact_daily@ds=1 #### A masked pattern was here #### -POSTHOOK: query: -- List Bucketing Query -select x from (select * from fact_daily where ds = '1') subq where x = 484 +POSTHOOK: query: select x from (select * from fact_daily where ds = '1') subq where x = 484 POSTHOOK: type: QUERY POSTHOOK: Input: default@fact_daily POSTHOOK: Input: default@fact_daily@ds=1 #### A masked pattern was here #### 484 -PREHOOK: query: -- The second subquery --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended select x1, y1 from(select x as x1, y as y1 from fact_daily where ds ='1') subq where x1 = 484 +PREHOOK: query: explain extended select x1, y1 from(select x as x1, y as y1 from fact_daily where ds ='1') subq where x1 = 484 PREHOOK: type: QUERY -POSTHOOK: query: -- The second subquery --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended select x1, y1 from(select x as x1, y as y1 from fact_daily where ds ='1') subq where x1 = 484 +POSTHOOK: query: explain extended select x1, y1 from(select x as x1, y as y1 from fact_daily where ds ='1') subq where x1 = 484 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -330,26 +279,20 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- List Bucketing Query -select x1, y1 from(select x as x1, y as y1 from fact_daily where ds ='1') subq where x1 = 484 +PREHOOK: query: select x1, y1 from(select x as x1, y as y1 from fact_daily where ds ='1') subq where x1 = 484 PREHOOK: type: QUERY PREHOOK: Input: default@fact_daily PREHOOK: Input: default@fact_daily@ds=1 #### A masked pattern was here #### -POSTHOOK: query: -- List Bucketing Query -select x1, y1 from(select x as x1, y as y1 from fact_daily where ds ='1') subq where x1 = 484 +POSTHOOK: query: select x1, y1 from(select x as x1, y as y1 from fact_daily where ds ='1') subq where x1 = 484 POSTHOOK: type: QUERY POSTHOOK: Input: default@fact_daily POSTHOOK: Input: default@fact_daily@ds=1 #### A masked pattern was here #### 484 val_484 -PREHOOK: query: -- The third subquery --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended select y, count(1) from fact_daily where ds ='1' and x = 484 group by y +PREHOOK: query: explain extended select y, count(1) from fact_daily where ds ='1' and x = 484 group by y PREHOOK: type: QUERY -POSTHOOK: query: -- The third subquery --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended select y, count(1) from fact_daily where ds ='1' and x = 484 group by y +POSTHOOK: query: explain extended select y, count(1) from fact_daily where ds ='1' and x = 484 group by y POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -471,26 +414,20 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- List Bucketing Query -select y, count(1) from fact_daily where ds ='1' and x = 484 group by y +PREHOOK: query: select y, count(1) from fact_daily where ds ='1' and x = 484 group by y PREHOOK: type: QUERY PREHOOK: Input: default@fact_daily PREHOOK: Input: default@fact_daily@ds=1 #### A masked pattern was here #### -POSTHOOK: query: -- List Bucketing Query -select y, count(1) from fact_daily where ds ='1' and x = 484 group by y +POSTHOOK: query: select y, count(1) from fact_daily where ds ='1' and x = 484 group by y POSTHOOK: type: QUERY POSTHOOK: Input: default@fact_daily POSTHOOK: Input: default@fact_daily@ds=1 #### A masked pattern was here #### val_484 1 -PREHOOK: query: -- The fourth subquery --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended select x, c from (select x, count(1) as c from fact_daily where ds = '1' group by x) subq where x = 484 +PREHOOK: query: explain extended select x, c from (select x, count(1) as c from fact_daily where ds = '1' group by x) subq where x = 484 PREHOOK: type: QUERY -POSTHOOK: query: -- The fourth subquery --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended select x, c from (select x, count(1) as c from fact_daily where ds = '1' group by x) subq where x = 484 +POSTHOOK: query: explain extended select x, c from (select x, count(1) as c from fact_daily where ds = '1' group by x) subq where x = 484 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -614,14 +551,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- List Bucketing Query -select x, c from (select x, count(1) as c from fact_daily where ds = '1' group by x) subq where x = 484 +PREHOOK: query: select x, c from (select x, count(1) as c from fact_daily where ds = '1' group by x) subq where x = 484 PREHOOK: type: QUERY PREHOOK: Input: default@fact_daily PREHOOK: Input: default@fact_daily@ds=1 #### A masked pattern was here #### -POSTHOOK: query: -- List Bucketing Query -select x, c from (select x, count(1) as c from fact_daily where ds = '1' group by x) subq where x = 484 +POSTHOOK: query: select x, c from (select x, count(1) as c from fact_daily where ds = '1' group by x) subq where x = 484 POSTHOOK: type: QUERY POSTHOOK: Input: default@fact_daily POSTHOOK: Input: default@fact_daily@ds=1 diff --git a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out index 702bc93..d0e40de 100644 --- a/ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out +++ b/ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out @@ -1,40 +1,8 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- List bucketing query logic test case. --- Test condition: --- 1. where clause has single skewed columns and non-skewed columns --- 3. where clause has a few operators --- Test focus: --- 1. basic list bucketing query works for not (equal) case --- Test result: --- 1. pruner only pick up right directory --- 2. query result is right - --- create 2 tables: fact_daily and fact_tz --- fact_daily will be used for list bucketing query --- fact_tz is a table used to prepare data and test directories -CREATE TABLE fact_daily(x int, y STRING, z STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE fact_daily(x int, y STRING, z STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@fact_daily -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- List bucketing query logic test case. --- Test condition: --- 1. where clause has single skewed columns and non-skewed columns --- 3. where clause has a few operators --- Test focus: --- 1. basic list bucketing query works for not (equal) case --- Test result: --- 1. pruner only pick up right directory --- 2. query result is right - --- create 2 tables: fact_daily and fact_tz --- fact_daily will be used for list bucketing query --- fact_tz is a table used to prepare data and test directories -CREATE TABLE fact_daily(x int, y STRING, z STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE fact_daily(x int, y STRING, z STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@fact_daily @@ -50,14 +18,12 @@ POSTHOOK: type: CREATETABLE #### A masked pattern was here #### POSTHOOK: Output: database:default POSTHOOK: Output: default@fact_tz -PREHOOK: query: -- create /fact_tz/ds=1/hr=1 directory -INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') +PREHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') SELECT key, value, value FROM src WHERE key=484 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@fact_tz@ds=1/hr=1 -POSTHOOK: query: -- create /fact_tz/ds=1/hr=1 directory -INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') +POSTHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='1') SELECT key, value, value FROM src WHERE key=484 POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -65,14 +31,12 @@ POSTHOOK: Output: default@fact_tz@ds=1/hr=1 POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=1).x EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=1).y SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=1).z SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- create /fact_tz/ds=1/hr=2 directory -INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2') +PREHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2') SELECT key, value, value FROM src WHERE key=278 or key=86 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@fact_tz@ds=1/hr=2 -POSTHOOK: query: -- create /fact_tz/ds=1/hr=2 directory -INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2') +POSTHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='2') SELECT key, value, value FROM src WHERE key=278 or key=86 POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -80,14 +44,12 @@ POSTHOOK: Output: default@fact_tz@ds=1/hr=2 POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=2).x EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=2).y SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=2).z SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- create /fact_tz/ds=1/hr=3 directory -INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='3') +PREHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='3') SELECT key, value, value FROM src WHERE key=238 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@fact_tz@ds=1/hr=3 -POSTHOOK: query: -- create /fact_tz/ds=1/hr=3 directory -INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='3') +POSTHOOK: query: INSERT OVERWRITE TABLE fact_tz PARTITION (ds='1', hr='3') SELECT key, value, value FROM src WHERE key=238 POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -96,13 +58,11 @@ POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=3).x EXPRESSION [(src)src.FieldSche POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=3).y SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: fact_tz PARTITION(ds=1,hr=3).z SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] #### A masked pattern was here #### -PREHOOK: query: -- switch fact_daily to skewed table and point its location to /fact_tz/ds=1 -alter table fact_daily skewed by (x) on (484,238) +PREHOOK: query: alter table fact_daily skewed by (x) on (484,238) PREHOOK: type: ALTERTABLE_SKEWED PREHOOK: Input: default@fact_daily PREHOOK: Output: default@fact_daily -POSTHOOK: query: -- switch fact_daily to skewed table and point its location to /fact_tz/ds=1 -alter table fact_daily skewed by (x) on (484,238) +POSTHOOK: query: alter table fact_daily skewed by (x) on (484,238) POSTHOOK: type: ALTERTABLE_SKEWED POSTHOOK: Input: default@fact_daily POSTHOOK: Output: default@fact_daily @@ -125,14 +85,11 @@ POSTHOOK: type: ALTERTABLE_ADDPARTS #### A masked pattern was here #### POSTHOOK: Output: default@fact_daily POSTHOOK: Output: default@fact_daily@ds=1 -PREHOOK: query: -- set List Bucketing location map #### A masked pattern was here #### PREHOOK: type: ALTERTBLPART_SKEWED_LOCATION PREHOOK: Input: default@fact_daily PREHOOK: Output: default@fact_daily@ds=1 #### A masked pattern was here #### -POSTHOOK: query: -- set List Bucketing location map -#### A masked pattern was here #### POSTHOOK: type: ALTERTBLPART_SKEWED_LOCATION POSTHOOK: Input: default@fact_daily POSTHOOK: Input: default@fact_daily@ds=1 @@ -195,13 +152,9 @@ POSTHOOK: Input: default@fact_daily@ds=1 278 val_278 val_278 1 484 val_484 val_484 1 86 val_86 val_86 1 -PREHOOK: query: -- pruner pick up right directory --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT x FROM fact_daily WHERE ds='1' and not (x = 86) +PREHOOK: query: explain extended SELECT x FROM fact_daily WHERE ds='1' and not (x = 86) PREHOOK: type: QUERY -POSTHOOK: query: -- pruner pick up right directory --- explain plan shows which directory selected: Truncated Path -> Alias -explain extended SELECT x FROM fact_daily WHERE ds='1' and not (x = 86) +POSTHOOK: query: explain extended SELECT x FROM fact_daily WHERE ds='1' and not (x = 86) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -267,14 +220,12 @@ STAGE PLANS: Statistics: Num rows: 29 Data size: 117 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- List Bucketing Query -SELECT x FROM fact_daily WHERE ds='1' and not (x = 86) +PREHOOK: query: SELECT x FROM fact_daily WHERE ds='1' and not (x = 86) PREHOOK: type: QUERY PREHOOK: Input: default@fact_daily PREHOOK: Input: default@fact_daily@ds=1 #### A masked pattern was here #### -POSTHOOK: query: -- List Bucketing Query -SELECT x FROM fact_daily WHERE ds='1' and not (x = 86) +POSTHOOK: query: SELECT x FROM fact_daily WHERE ds='1' and not (x = 86) POSTHOOK: type: QUERY POSTHOOK: Input: default@fact_daily POSTHOOK: Input: default@fact_daily@ds=1 diff --git a/ql/src/test/results/clientpositive/llap/acid_globallimit.q.out b/ql/src/test/results/clientpositive/llap/acid_globallimit.q.out index 12545c7..aa1914c 100644 --- a/ql/src/test/results/clientpositive/llap/acid_globallimit.q.out +++ b/ql/src/test/results/clientpositive/llap/acid_globallimit.q.out @@ -1,13 +1,11 @@ -PREHOOK: query: -- Global Limit optimization does not work with ACID table. Make sure to skip it for ACID table. -CREATE TABLE acidtest1(c1 INT, c2 STRING) +PREHOOK: query: CREATE TABLE acidtest1(c1 INT, c2 STRING) CLUSTERED BY (c1) INTO 3 BUCKETS STORED AS ORC TBLPROPERTIES ("transactional"="true") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@acidtest1 -POSTHOOK: query: -- Global Limit optimization does not work with ACID table. Make sure to skip it for ACID table. -CREATE TABLE acidtest1(c1 INT, c2 STRING) +POSTHOOK: query: CREATE TABLE acidtest1(c1 INT, c2 STRING) CLUSTERED BY (c1) INTO 3 BUCKETS STORED AS ORC TBLPROPERTIES ("transactional"="true") diff --git a/ql/src/test/results/clientpositive/llap/authorization_2.q.out b/ql/src/test/results/clientpositive/llap/authorization_2.q.out index 449cd02..1fb32fc 100644 --- a/ql/src/test/results/clientpositive/llap/authorization_2.q.out +++ b/ql/src/test/results/clientpositive/llap/authorization_2.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF - -create table authorization_part (key int, value string) partitioned by (ds string) +PREHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@authorization_part -POSTHOOK: query: -- SORT_BEFORE_DIFF - -create table authorization_part (key int, value string) partitioned by (ds string) +POSTHOOK: query: create table authorization_part (key int, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@authorization_part @@ -30,12 +26,10 @@ POSTHOOK: query: ALTER TABLE authorization_part SET TBLPROPERTIES ("PARTITION_LE POSTHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: Input: default@authorization_part POSTHOOK: Output: default@authorization_part -PREHOOK: query: -- column grant to user -grant Create on authorization_part to user hive_test_user +PREHOOK: query: grant Create on authorization_part to user hive_test_user PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@authorization_part -POSTHOOK: query: -- column grant to user -grant Create on authorization_part to user hive_test_user +POSTHOOK: query: grant Create on authorization_part to user hive_test_user POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@authorization_part PREHOOK: query: grant Update on table authorization_part to user hive_test_user @@ -204,11 +198,9 @@ POSTHOOK: query: alter table authorization_part drop partition (ds='2010') POSTHOOK: type: ALTERTABLE_DROPPARTS POSTHOOK: Input: default@authorization_part POSTHOOK: Output: default@authorization_part@ds=2010 -PREHOOK: query: -- table grant to user -show grant user hive_test_user on table authorization_part +PREHOOK: query: show grant user hive_test_user on table authorization_part PREHOOK: type: SHOW_GRANT -POSTHOOK: query: -- table grant to user -show grant user hive_test_user on table authorization_part +POSTHOOK: query: show grant user hive_test_user on table authorization_part POSTHOOK: type: SHOW_GRANT default authorization_part hive_test_user USER CREATE false -1 hive_test_user default authorization_part hive_test_user USER DROP false -1 hive_test_user @@ -369,13 +361,9 @@ POSTHOOK: query: alter table authorization_part drop partition (ds='2010') POSTHOOK: type: ALTERTABLE_DROPPARTS POSTHOOK: Input: default@authorization_part POSTHOOK: Output: default@authorization_part@ds=2010 -PREHOOK: query: -- column grant to group - -show grant group hive_test_group1 on table authorization_part +PREHOOK: query: show grant group hive_test_group1 on table authorization_part PREHOOK: type: SHOW_GRANT -POSTHOOK: query: -- column grant to group - -show grant group hive_test_group1 on table authorization_part +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part POSTHOOK: type: SHOW_GRANT PREHOOK: query: alter table authorization_part add partition (ds='2010') PREHOOK: type: ALTERTABLE_ADDPARTS @@ -515,11 +503,9 @@ POSTHOOK: query: alter table authorization_part drop partition (ds='2010') POSTHOOK: type: ALTERTABLE_DROPPARTS POSTHOOK: Input: default@authorization_part POSTHOOK: Output: default@authorization_part@ds=2010 -PREHOOK: query: -- table grant to group -show grant group hive_test_group1 on table authorization_part +PREHOOK: query: show grant group hive_test_group1 on table authorization_part PREHOOK: type: SHOW_GRANT -POSTHOOK: query: -- table grant to group -show grant group hive_test_group1 on table authorization_part +POSTHOOK: query: show grant group hive_test_group1 on table authorization_part POSTHOOK: type: SHOW_GRANT PREHOOK: query: alter table authorization_part add partition (ds='2010') PREHOOK: type: ALTERTABLE_ADDPARTS diff --git a/ql/src/test/results/clientpositive/llap/auto_join0.q.out b/ql/src/test/results/clientpositive/llap/auto_join0.q.out index 3dda24a..72e4e27 100644 --- a/ql/src/test/results/clientpositive/llap/auto_join0.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_join0.q.out @@ -1,7 +1,5 @@ Warning: Map Join MAPJOIN[24][bigTable=?] in task 'Reducer 2' is a cross product -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain select sum(hash(a.k1,a.v1,a.k2, a.v2)) from ( SELECT src1.key as k1, src1.value as v1, @@ -12,9 +10,7 @@ SELECT src1.key as k1, src1.value as v1, SORT BY k1, v1, k2, v2 ) a PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain select sum(hash(a.k1,a.v1,a.k2, a.v2)) from ( SELECT src1.key as k1, src1.value as v1, diff --git a/ql/src/test/results/clientpositive/llap/auto_join1.q.out b/ql/src/test/results/clientpositive/llap/auto_join1.q.out index 3624062..6a0a1d5 100644 --- a/ql/src/test/results/clientpositive/llap/auto_join1.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_join1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/llap/auto_join21.q.out b/ql/src/test/results/clientpositive/llap/auto_join21.q.out index 91ea004..97b36f7 100644 --- a/ql/src/test/results/clientpositive/llap/auto_join21.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_join21.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/llap/auto_join29.q.out b/ql/src/test/results/clientpositive/llap/auto_join29.q.out index 94134c5..a3cc39d 100644 --- a/ql/src/test/results/clientpositive/llap/auto_join29.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_join29.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/llap/auto_smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/llap/auto_smb_mapjoin_14.q.out index e999077..224d39a 100644 --- a/ql/src/test/results/clientpositive/llap/auto_smb_mapjoin_14.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_smb_mapjoin_14.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tbl1 @@ -42,14 +38,12 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@tbl2 POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 @@ -143,8 +137,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 22 -PREHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select key, count(*) from @@ -154,8 +147,7 @@ select count(*) from group by key ) subq2 PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select key, count(*) from @@ -286,9 +278,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 6 -PREHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them. --- Each sub-query should be converted to a sort-merge join. -explain +PREHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -305,9 +295,7 @@ join ) src2 on src1.key = src2.key PREHOOK: type: QUERY -POSTHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them. --- Each sub-query should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -529,18 +517,14 @@ POSTHOOK: Input: default@tbl2 5 9 9 8 1 1 9 1 1 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -640,9 +624,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join, although there is more than one level of sub-query -explain +PREHOOK: query: explain select count(*) from ( select * from @@ -654,9 +636,7 @@ select count(*) from join tbl2 b on subq2.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join, although there is more than one level of sub-query -explain +POSTHOOK: query: explain select count(*) from ( select * from @@ -771,9 +751,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. --- The join should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select * from @@ -792,9 +770,7 @@ select count(*) from ) subq4 on subq2.key = subq4.key PREHOOK: type: QUERY -POSTHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. --- The join should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select * from @@ -928,20 +904,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl1 #### A masked pattern was here #### 20 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key --- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one --- item, but that is not part of the join key. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 join (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key --- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one --- item, but that is not part of the join key. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 join @@ -1041,18 +1011,14 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side --- join should be performed -explain +PREHOOK: query: explain select count(*) from (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 join (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side --- join should be performed -explain +POSTHOOK: query: explain select count(*) from (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 join @@ -1170,16 +1136,12 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 22 -PREHOOK: query: -- One of the tables is a sub-query and the other is not. --- It should be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join tbl2 a on subq1.key = a.key PREHOOK: type: QUERY -POSTHOOK: query: -- One of the tables is a sub-query and the other is not. --- It should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join tbl2 a on subq1.key = a.key @@ -1273,9 +1235,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. --- It should be converted to to a sort-merge join -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -1285,9 +1245,7 @@ select count(*) from (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 on (subq1.key = subq3.key) PREHOOK: type: QUERY -POSTHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. --- It should be converted to to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -1409,9 +1367,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 56 -PREHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that. --- The join should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select subq2.key as key, subq2.value as value1, b.value as value2 from ( @@ -1424,9 +1380,7 @@ select count(*) from ( join tbl2 b on subq2.key = b.key) a PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that. --- The join should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select subq2.key as key, subq2.value as value1, b.value as value2 from ( @@ -1560,18 +1514,14 @@ POSTHOOK: query: CREATE TABLE dest2(key int, val1 string, val2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest2 -PREHOOK: query: -- The join is followed by a multi-table insert. It should be converted to --- a sort-merge join -explain +PREHOOK: query: explain from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 insert overwrite table dest1 select key, val1 insert overwrite table dest2 select key, val1, val2 PREHOOK: type: QUERY -POSTHOOK: query: -- The join is followed by a multi-table insert. It should be converted to --- a sort-merge join -explain +POSTHOOK: query: explain from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 @@ -1770,18 +1720,14 @@ POSTHOOK: query: CREATE TABLE dest2(key int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest2 -PREHOOK: query: -- The join is followed by a multi-table insert, and one of the inserts involves a reducer. --- It should be converted to a sort-merge join -explain +PREHOOK: query: explain from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 insert overwrite table dest1 select key, val1 insert overwrite table dest2 select key, count(*) group by key PREHOOK: type: QUERY -POSTHOOK: query: -- The join is followed by a multi-table insert, and one of the inserts involves a reducer. --- It should be converted to a sort-merge join -explain +POSTHOOK: query: explain from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_1.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_1.q.out index 9a586fa..887d0b8 100644 --- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_1.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_1.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket - -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket - -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -103,11 +99,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.t POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_10.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_10.q.out index 4a049c7..21ddd9b 100644 --- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_10.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_10.q.out @@ -38,8 +38,7 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@tbl2 POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- One of the subqueries contains a union, so it should not be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from ( select * from @@ -51,8 +50,7 @@ select count(*) from (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- One of the subqueries contains a union, so it should not be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from ( select * from @@ -219,16 +217,14 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 40 -PREHOOK: query: -- One of the subqueries contains a groupby, so it should not be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, count(*) as value from tbl1 a where key < 6 group by a.key) subq1 join (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- One of the subqueries contains a groupby, so it should not be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, count(*) as value from tbl1 a where key < 6 group by a.key) subq1 join diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_11.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_11.q.out index 5cb3db5..bc23d62 100644 --- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_11.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_11.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket - -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket - -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small @@ -385,17 +381,9 @@ POSTHOOK: Input: default@bucket_small POSTHOOK: Input: default@bucket_small@ds=2008-04-08 #### A masked pattern was here #### 38 -PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter --- The tables are only bucketed and not sorted, the join should not be converted --- Currenly, a join is only converted to a sort-merge join without a hint, automatic conversion to --- bucketized mapjoin is not done -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter --- The tables are only bucketed and not sorted, the join should not be converted --- Currenly, a join is only converted to a sort-merge join without a hint, automatic conversion to --- bucketized mapjoin is not done -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -677,11 +665,9 @@ POSTHOOK: Input: default@bucket_small POSTHOOK: Input: default@bucket_small@ds=2008-04-08 #### A masked pattern was here #### 38 -PREHOOK: query: -- The join is converted to a bucketed mapjoin with a mapjoin hint -explain extended select /*+ mapjoin(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select /*+ mapjoin(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- The join is converted to a bucketed mapjoin with a mapjoin hint -explain extended select /*+ mapjoin(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select /*+ mapjoin(a) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -955,11 +941,9 @@ POSTHOOK: Input: default@bucket_small POSTHOOK: Input: default@bucket_small@ds=2008-04-08 #### A masked pattern was here #### 38 -PREHOOK: query: -- HIVE-7023 -explain extended select /* + MAPJOIN(a,b) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key JOIN bucket_big c ON a.key = c.key +PREHOOK: query: explain extended select /* + MAPJOIN(a,b) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key JOIN bucket_big c ON a.key = c.key PREHOOK: type: QUERY -POSTHOOK: query: -- HIVE-7023 -explain extended select /* + MAPJOIN(a,b) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key JOIN bucket_big c ON a.key = c.key +POSTHOOK: query: explain extended select /* + MAPJOIN(a,b) */ count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key JOIN bucket_big c ON a.key = c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_12.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_12.q.out index 1bef238..c6a4220 100644 --- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_12.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_12.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket - -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket - -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_13.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_13.q.out index a539e03..4ac2369 100644 --- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_13.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_13.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tbl1 @@ -54,8 +50,7 @@ POSTHOOK: query: CREATE TABLE dest2(k1 string, k2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest2 -PREHOOK: query: -- A SMB join followed by a mutli-insert -explain +PREHOOK: query: explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 FROM tbl1 a JOIN tbl2 b @@ -63,8 +58,7 @@ from ( INSERT OVERWRITE TABLE dest1 select key1, key2 INSERT OVERWRITE TABLE dest2 select value1, value2 PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join followed by a mutli-insert -explain +POSTHOOK: query: explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 FROM tbl1 a JOIN tbl2 b @@ -268,8 +262,7 @@ val_5 val_5 val_5 val_5 val_8 val_8 val_9 val_9 -PREHOOK: query: -- A SMB join followed by a mutli-insert -explain +PREHOOK: query: explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 FROM tbl1 a JOIN tbl2 b @@ -277,8 +270,7 @@ from ( INSERT OVERWRITE TABLE dest1 select key1, key2 INSERT OVERWRITE TABLE dest2 select value1, value2 PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join followed by a mutli-insert -explain +POSTHOOK: query: explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 FROM tbl1 a JOIN tbl2 b @@ -482,8 +474,7 @@ val_5 val_5 val_5 val_5 val_8 val_8 val_9 val_9 -PREHOOK: query: -- A SMB join followed by a mutli-insert -explain +PREHOOK: query: explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 FROM tbl1 a JOIN tbl2 b @@ -491,8 +482,7 @@ from ( INSERT OVERWRITE TABLE dest1 select key1, key2 INSERT OVERWRITE TABLE dest2 select value1, value2 PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join followed by a mutli-insert -explain +POSTHOOK: query: explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 FROM tbl1 a JOIN tbl2 b diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_14.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_14.q.out index 9bab958..2da3d33 100644 --- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_14.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_14.q.out @@ -34,12 +34,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@tbl2 POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Since tbl1 is the bigger table, tbl1 Left Outer Join tbl2 can be performed -explain +PREHOOK: query: explain select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since tbl1 is the bigger table, tbl1 Left Outer Join tbl2 can be performed -explain +POSTHOOK: query: explain select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -143,12 +141,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@tbl2 POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Since tbl2 is the bigger table, tbl1 Right Outer Join tbl2 can be performed -explain +PREHOOK: query: explain select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since tbl2 is the bigger table, tbl1 Right Outer Join tbl2 can be performed -explain +POSTHOOK: query: explain select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_16.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_16.q.out index d4ecb19..cb8564f 100644 --- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_16.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_16.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE stage_bucket_big +PREHOOK: query: CREATE TABLE stage_bucket_big ( key BIGINT, value STRING @@ -9,9 +7,7 @@ PARTITIONED BY (file_tag STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@stage_bucket_big -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE stage_bucket_big +POSTHOOK: query: CREATE TABLE stage_bucket_big ( key BIGINT, value STRING diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_2.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_2.q.out index 0b1d648..a26a473 100644 --- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_2.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_2.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small 1 part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 1 part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small @@ -83,11 +81,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.t POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: -- Since the leftmost table is assumed as the big table, arrange the tables in the join accordingly -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since the leftmost table is assumed as the big table, arrange the tables in the join accordingly -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -372,11 +368,9 @@ POSTHOOK: Input: default@bucket_small POSTHOOK: Input: default@bucket_small@ds=2008-04-08 #### A masked pattern was here #### 38 -PREHOOK: query: -- The mapjoin should fail resulting in the sort-merge join -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- The mapjoin should fail resulting in the sort-merge join -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_3.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_3.q.out index d99d425..b38c036 100644 --- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_3.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_3.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small 2 part, 2 bucket & big 1 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 2 part, 2 bucket & big 1 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small @@ -83,11 +81,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.t POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_4.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_4.q.out index 069f08d..1bf95ba 100644 --- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_4.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_4.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small 2 part, 4 bucket & big 1 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 2 part, 4 bucket & big 1 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small @@ -99,11 +97,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.t POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_5.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_5.q.out index 5e45f9c..ac8a75a 100644 --- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_5.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_5.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- small no part, 4 bucket & big no part, 2 bucket - --- SORT_QUERY_RESULTS - -CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small no part, 4 bucket & big no part, 2 bucket - --- SORT_QUERY_RESULTS - -CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small @@ -70,11 +62,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.t POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@bucket_big -PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_6.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_6.q.out index 9e83db1..7607b08 100644 --- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_6.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_6.q.out @@ -70,23 +70,9 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@tbl4 POSTHOOK: Lineage: tbl4.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tbl4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on a different key - --- Three tests below are all the same query with different alias, which changes dispatch order of GenMapRedWalker --- This is dependent to iteration order of HashMap, so can be meaningless in non-sun jdk --- b = TS[0]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8]-SEL[9]-FS[10] --- c = TS[1]-RS[7]-JOIN[8] --- a = TS[2]-MAPJOIN[11] -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on a different key - --- Three tests below are all the same query with different alias, which changes dispatch order of GenMapRedWalker --- This is dependent to iteration order of HashMap, so can be meaningless in non-sun jdk --- b = TS[0]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8]-SEL[9]-FS[10] --- c = TS[1]-RS[7]-JOIN[8] --- a = TS[2]-MAPJOIN[11] -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -211,15 +197,9 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 2654 -PREHOOK: query: -- d = TS[0]-RS[7]-JOIN[8]-SEL[9]-FS[10] --- b = TS[1]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8] --- a = TS[2]-MAPJOIN[11] -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src d on d.value = a.value +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src d on d.value = a.value PREHOOK: type: QUERY -POSTHOOK: query: -- d = TS[0]-RS[7]-JOIN[8]-SEL[9]-FS[10] --- b = TS[1]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8] --- a = TS[2]-MAPJOIN[11] -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src d on d.value = a.value +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src d on d.value = a.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -344,15 +324,9 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 2654 -PREHOOK: query: -- b = TS[0]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8]-SEL[9]-FS[10] --- a = TS[1]-MAPJOIN[11] --- h = TS[2]-RS[7]-JOIN[8] -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src h on h.value = a.value +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src h on h.value = a.value PREHOOK: type: QUERY -POSTHOOK: query: -- b = TS[0]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8]-SEL[9]-FS[10] --- a = TS[1]-MAPJOIN[11] --- h = TS[2]-RS[7]-JOIN[8] -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src h on h.value = a.value +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src h on h.value = a.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -477,11 +451,9 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 2654 -PREHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -606,11 +578,9 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 2654 -PREHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -712,11 +682,9 @@ POSTHOOK: Input: default@tbl2 POSTHOOK: Input: default@tbl3 #### A masked pattern was here #### 2654 -PREHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on a different key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on a different key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -841,11 +809,9 @@ POSTHOOK: Input: default@tbl2 POSTHOOK: Input: default@tbl4 #### A masked pattern was here #### 2654 -PREHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on a different key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on a different key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -970,11 +936,9 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 2654 -PREHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1099,11 +1063,9 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 2654 -PREHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1205,11 +1167,9 @@ POSTHOOK: Input: default@tbl2 POSTHOOK: Input: default@tbl3 #### A masked pattern was here #### 2654 -PREHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on a different key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on a different key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_7.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_7.q.out index 3e4f408..93e9658 100644 --- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_7.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_7.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small 2 part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 2 part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small @@ -116,11 +114,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.t POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_8.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_8.q.out index dae32a5..dadb99b 100644 --- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_8.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_8.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small 2 part, 2 bucket & big 2 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 2 part, 2 bucket & big 2 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small @@ -116,11 +114,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.t POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -792,11 +788,9 @@ POSTHOOK: Input: default@bucket_small@ds=2008-04-08 POSTHOOK: Input: default@bucket_small@ds=2008-04-09 #### A masked pattern was here #### 76 -PREHOOK: query: -- The mapjoin should fail resulting in the sort-merge join -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- The mapjoin should fail resulting in the sort-merge join -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_9.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_9.q.out index 9549778..bbb2487 100644 --- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_9.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_9.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tbl1 @@ -42,14 +38,12 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@tbl2 POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 @@ -155,16 +149,14 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 22 -PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select key, count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 group by key PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select key, count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key @@ -286,8 +278,7 @@ POSTHOOK: Input: default@tbl2 5 9 8 1 9 1 -PREHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select key, count(*) from @@ -297,8 +288,7 @@ select count(*) from group by key ) subq2 PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select key, count(*) from @@ -441,9 +431,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 6 -PREHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them. --- Each sub-query should be converted to a sort-merge join. -explain +PREHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -460,9 +448,7 @@ join ) src2 on src1.key = src2.key PREHOOK: type: QUERY -POSTHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them. --- Each sub-query should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -700,18 +686,14 @@ POSTHOOK: Input: default@tbl2 5 9 9 8 1 1 9 1 1 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -823,9 +805,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join, although there is more than one level of sub-query -explain +PREHOOK: query: explain select count(*) from ( select * from @@ -837,9 +817,7 @@ select count(*) from join tbl2 b on subq2.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join, although there is more than one level of sub-query -explain +POSTHOOK: query: explain select count(*) from ( select * from @@ -966,9 +944,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. --- The join should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select * from @@ -987,9 +963,7 @@ select count(*) from ) subq4 on subq2.key = subq4.key PREHOOK: type: QUERY -POSTHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. --- The join should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select * from @@ -1135,20 +1109,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl1 #### A masked pattern was here #### 20 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key --- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one --- item, but that is not part of the join key. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 join (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key --- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one --- item, but that is not part of the join key. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 join @@ -1260,18 +1228,14 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized mapside --- join should be performed -explain +PREHOOK: query: explain select count(*) from (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 join (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized mapside --- join should be performed -explain +POSTHOOK: query: explain select count(*) from (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 join @@ -1383,16 +1347,12 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 22 -PREHOOK: query: -- The left table is a sub-query and the right table is not. --- It should be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join tbl2 a on subq1.key = a.key PREHOOK: type: QUERY -POSTHOOK: query: -- The left table is a sub-query and the right table is not. --- It should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join tbl2 a on subq1.key = a.key @@ -1498,17 +1458,13 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- The right table is a sub-query and the left table is not. --- It should be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from tbl1 a join (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 on a.key = subq1.key PREHOOK: type: QUERY -POSTHOOK: query: -- The right table is a sub-query and the left table is not. --- It should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from tbl1 a join (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 @@ -1617,9 +1573,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. --- It should be converted to to a sort-merge join -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -1629,9 +1583,7 @@ select count(*) from (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 on (subq1.key = subq3.key) PREHOOK: type: QUERY -POSTHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. --- It should be converted to to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -1774,9 +1726,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 56 -PREHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that. --- The join should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select subq2.key as key, subq2.value as value1, b.value as value2 from ( @@ -1789,9 +1739,7 @@ select count(*) from ( join tbl2 b on subq2.key = b.key) a PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that. --- The join should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select subq2.key as key, subq2.value as value1, b.value as value2 from ( @@ -1921,14 +1869,12 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 @@ -2034,16 +1980,14 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 22 -PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select key, count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 group by key PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select key, count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key @@ -2165,8 +2109,7 @@ POSTHOOK: Input: default@tbl2 5 9 8 1 9 1 -PREHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select key, count(*) from @@ -2176,8 +2119,7 @@ select count(*) from group by key ) subq2 PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select key, count(*) from @@ -2320,9 +2262,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 6 -PREHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them. --- Each sub-query should be converted to a sort-merge join. -explain +PREHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -2339,9 +2279,7 @@ join ) src2 on src1.key = src2.key PREHOOK: type: QUERY -POSTHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them. --- Each sub-query should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -2579,18 +2517,14 @@ POSTHOOK: Input: default@tbl2 5 9 9 8 1 1 9 1 1 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -2702,9 +2636,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join, although there is more than one level of sub-query -explain +PREHOOK: query: explain select count(*) from ( select * from @@ -2716,9 +2648,7 @@ select count(*) from join tbl2 b on subq2.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join, although there is more than one level of sub-query -explain +POSTHOOK: query: explain select count(*) from ( select * from @@ -2845,9 +2775,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. --- The join should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select * from @@ -2866,9 +2794,7 @@ select count(*) from ) subq4 on subq2.key = subq4.key PREHOOK: type: QUERY -POSTHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. --- The join should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select * from @@ -3014,20 +2940,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl1 #### A masked pattern was here #### 20 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key --- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one --- item, but that is not part of the join key. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 join (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key --- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one --- item, but that is not part of the join key. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 join @@ -3139,16 +3059,12 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- The left table is a sub-query and the right table is not. --- It should be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join tbl2 a on subq1.key = a.key PREHOOK: type: QUERY -POSTHOOK: query: -- The left table is a sub-query and the right table is not. --- It should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join tbl2 a on subq1.key = a.key @@ -3254,17 +3170,13 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- The right table is a sub-query and the left table is not. --- It should be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from tbl1 a join (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 on a.key = subq1.key PREHOOK: type: QUERY -POSTHOOK: query: -- The right table is a sub-query and the left table is not. --- It should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from tbl1 a join (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 @@ -3373,9 +3285,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. --- It should be converted to to a sort-merge join -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -3385,9 +3295,7 @@ select count(*) from (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 on (subq1.key = subq3.key) PREHOOK: type: QUERY -POSTHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. --- It should be converted to to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -3530,9 +3438,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 56 -PREHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that. --- The join should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select subq2.key as key, subq2.value as value1, b.value as value2 from ( @@ -3545,9 +3451,7 @@ select count(*) from ( join tbl2 b on subq2.key = b.key) a PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that. --- The join should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select subq2.key as key, subq2.value as value1, b.value as value2 from ( diff --git a/ql/src/test/results/clientpositive/llap/bucket2.q.out b/ql/src/test/results/clientpositive/llap/bucket2.q.out index 244f247..edd6735 100644 --- a/ql/src/test/results/clientpositive/llap/bucket2.q.out +++ b/ql/src/test/results/clientpositive/llap/bucket2.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket2_1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket2_1 diff --git a/ql/src/test/results/clientpositive/llap/bucket3.q.out b/ql/src/test/results/clientpositive/llap/bucket3.q.out index dcf731d..b4fd08d 100644 --- a/ql/src/test/results/clientpositive/llap/bucket3.q.out +++ b/ql/src/test/results/clientpositive/llap/bucket3.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket3_1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket3_1 diff --git a/ql/src/test/results/clientpositive/llap/bucket_groupby.q.out b/ql/src/test/results/clientpositive/llap/bucket_groupby.q.out index 7725869..2c3cf2f 100644 --- a/ql/src/test/results/clientpositive/llap/bucket_groupby.q.out +++ b/ql/src/test/results/clientpositive/llap/bucket_groupby.q.out @@ -168,12 +168,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@clustergroupby@ds=101 POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: clustergroupby PARTITION(ds=101).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: --normal-- -explain +PREHOOK: query: explain select key, count(1) from clustergroupby where ds='101' group by key order by key limit 10 PREHOOK: type: QUERY -POSTHOOK: query: --normal-- -explain +POSTHOOK: query: explain select key, count(1) from clustergroupby where ds='101' group by key order by key limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -272,12 +270,10 @@ POSTHOOK: Input: default@clustergroupby@ds=101 111 1 113 2 114 1 -PREHOOK: query: --function-- -explain +PREHOOK: query: explain select length(key), count(1) from clustergroupby where ds='101' group by length(key) limit 10 PREHOOK: type: QUERY -POSTHOOK: query: --function-- -explain +POSTHOOK: query: explain select length(key), count(1) from clustergroupby where ds='101' group by length(key) limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -436,12 +432,10 @@ POSTHOOK: Input: default@clustergroupby@ds=101 1 10 2 74 3 416 -PREHOOK: query: --constant-- -explain +PREHOOK: query: explain select key, count(1) from clustergroupby where ds='101' group by key,3 order by key,3 limit 10 PREHOOK: type: QUERY -POSTHOOK: query: --constant-- -explain +POSTHOOK: query: explain select key, count(1) from clustergroupby where ds='101' group by key,3 order by key,3 limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -540,12 +534,10 @@ POSTHOOK: Input: default@clustergroupby@ds=101 111 1 113 2 114 1 -PREHOOK: query: --subquery-- -explain +PREHOOK: query: explain select key, count(1) from (select value as key, key as value from clustergroupby where ds='101')subq group by key order by key limit 10 PREHOOK: type: QUERY -POSTHOOK: query: --subquery-- -explain +POSTHOOK: query: explain select key, count(1) from (select value as key, key as value from clustergroupby where ds='101')subq group by key order by key limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1093,13 +1085,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- number of buckets cannot be changed, so drop the table -drop table clustergroupby +PREHOOK: query: drop table clustergroupby PREHOOK: type: DROPTABLE PREHOOK: Input: default@clustergroupby PREHOOK: Output: default@clustergroupby -POSTHOOK: query: -- number of buckets cannot be changed, so drop the table -drop table clustergroupby +POSTHOOK: query: drop table clustergroupby POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@clustergroupby POSTHOOK: Output: default@clustergroupby @@ -1111,13 +1101,11 @@ POSTHOOK: query: create table clustergroupby(key string, value string) partition POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@clustergroupby -PREHOOK: query: --sort columns-- -alter table clustergroupby clustered by (value) sorted by (key, value) into 1 buckets +PREHOOK: query: alter table clustergroupby clustered by (value) sorted by (key, value) into 1 buckets PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@clustergroupby PREHOOK: Output: default@clustergroupby -POSTHOOK: query: --sort columns-- -alter table clustergroupby clustered by (value) sorted by (key, value) into 1 buckets +POSTHOOK: query: alter table clustergroupby clustered by (value) sorted by (key, value) into 1 buckets POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@clustergroupby POSTHOOK: Output: default@clustergroupby @@ -1403,13 +1391,11 @@ POSTHOOK: Input: default@clustergroupby@ds=102 111 1 113 2 114 1 -PREHOOK: query: -- number of buckets cannot be changed, so drop the table -drop table clustergroupby +PREHOOK: query: drop table clustergroupby PREHOOK: type: DROPTABLE PREHOOK: Input: default@clustergroupby PREHOOK: Output: default@clustergroupby -POSTHOOK: query: -- number of buckets cannot be changed, so drop the table -drop table clustergroupby +POSTHOOK: query: drop table clustergroupby POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@clustergroupby POSTHOOK: Output: default@clustergroupby diff --git a/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out index 20702f9..2c79bda 100644 --- a/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out +++ b/ql/src/test/results/clientpositive/llap/bucket_map_join_tez1.q.out @@ -627,16 +627,12 @@ POSTHOOK: Input: default@tab_part POSTHOOK: Input: default@tab_part@ds=2008-04-08 #### A masked pattern was here #### 1166 -PREHOOK: query: -- one side is really bucketed. srcbucket_mapjoin is not really a bucketed table. --- In this case the sub-query is chosen as the big table. -explain +PREHOOK: query: explain select a.k1, a.v1, b.value from (select sum(substr(srcbucket_mapjoin.value,5)) as v1, key as k1 from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a join tab b on a.k1 = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- one side is really bucketed. srcbucket_mapjoin is not really a bucketed table. --- In this case the sub-query is chosen as the big table. -explain +POSTHOOK: query: explain select a.k1, a.v1, b.value from (select sum(substr(srcbucket_mapjoin.value,5)) as v1, key as k1 from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a join tab b on a.k1 = b.key @@ -1028,13 +1024,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- multi-way join -explain +PREHOOK: query: explain select a.key, a.value, b.value from tab_part a join tab b on a.key = b.key join tab c on a.key = c.key PREHOOK: type: QUERY -POSTHOOK: query: -- multi-way join -explain +POSTHOOK: query: explain select a.key, a.value, b.value from tab_part a join tab b on a.key = b.key join tab c on a.key = c.key POSTHOOK: type: QUERY @@ -1245,14 +1239,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- in this case sub-query is the small table -explain +PREHOOK: query: explain select a.key, a.value, b.value from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a join tab_part b on a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- in this case sub-query is the small table -explain +POSTHOOK: query: explain select a.key, a.value, b.value from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a join tab_part b on a.key = b.key @@ -1453,13 +1445,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- join on non-bucketed column results in broadcast join. -explain +PREHOOK: query: explain select a.key, a.value, b.value from tab a join tab_part b on a.value = b.value PREHOOK: type: QUERY -POSTHOOK: query: -- join on non-bucketed column results in broadcast join. -explain +POSTHOOK: query: explain select a.key, a.value, b.value from tab a join tab_part b on a.value = b.value POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/llap/bucketizedhiveinputformat.q.out b/ql/src/test/results/clientpositive/llap/bucketizedhiveinputformat.q.out index 163e819..e6cfb40 100644 --- a/ql/src/test/results/clientpositive/llap/bucketizedhiveinputformat.q.out +++ b/ql/src/test/results/clientpositive/llap/bucketizedhiveinputformat.q.out @@ -66,46 +66,38 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T3 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t3 -PREHOOK: query: -- 2 split by max.split.size -SELECT COUNT(1) FROM T2 +PREHOOK: query: SELECT COUNT(1) FROM T2 PREHOOK: type: QUERY PREHOOK: Input: default@t2 #### A masked pattern was here #### -POSTHOOK: query: -- 2 split by max.split.size -SELECT COUNT(1) FROM T2 +POSTHOOK: query: SELECT COUNT(1) FROM T2 POSTHOOK: type: QUERY POSTHOOK: Input: default@t2 #### A masked pattern was here #### 5000000 -PREHOOK: query: -- 1 split for two file -SELECT COUNT(1) FROM T3 +PREHOOK: query: SELECT COUNT(1) FROM T3 PREHOOK: type: QUERY PREHOOK: Input: default@t3 #### A masked pattern was here #### -POSTHOOK: query: -- 1 split for two file -SELECT COUNT(1) FROM T3 +POSTHOOK: query: SELECT COUNT(1) FROM T3 POSTHOOK: type: QUERY POSTHOOK: Input: default@t3 #### A masked pattern was here #### 1000 -PREHOOK: query: -- 1 split -SELECT COUNT(1) FROM T2 +PREHOOK: query: SELECT COUNT(1) FROM T2 PREHOOK: type: QUERY PREHOOK: Input: default@t2 #### A masked pattern was here #### -POSTHOOK: query: -- 1 split -SELECT COUNT(1) FROM T2 +POSTHOOK: query: SELECT COUNT(1) FROM T2 POSTHOOK: type: QUERY POSTHOOK: Input: default@t2 #### A masked pattern was here #### 5000000 -PREHOOK: query: -- 2 split for two file -SELECT COUNT(1) FROM T3 +PREHOOK: query: SELECT COUNT(1) FROM T3 PREHOOK: type: QUERY PREHOOK: Input: default@t3 #### A masked pattern was here #### -POSTHOOK: query: -- 2 split for two file -SELECT COUNT(1) FROM T3 +POSTHOOK: query: SELECT COUNT(1) FROM T3 POSTHOOK: type: QUERY POSTHOOK: Input: default@t3 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/llap/bucketmapjoin1.q.out b/ql/src/test/results/clientpositive/llap/bucketmapjoin1.q.out index e9bb701..96c0322 100644 --- a/ql/src/test/results/clientpositive/llap/bucketmapjoin1.q.out +++ b/ql/src/test/results/clientpositive/llap/bucketmapjoin1.q.out @@ -22,14 +22,12 @@ POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) p POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: -- empty partitions (HIVE-3205) -explain extended +PREHOOK: query: explain extended select /*+mapjoin(b)*/ a.key, a.value, b.value from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b on a.key=b.key where b.ds="2008-04-08" PREHOOK: type: QUERY -POSTHOOK: query: -- empty partitions (HIVE-3205) -explain extended +POSTHOOK: query: explain extended select /*+mapjoin(b)*/ a.key, a.value, b.value from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b on a.key=b.key where b.ds="2008-04-08" diff --git a/ql/src/test/results/clientpositive/llap/bucketmapjoin2.q.out b/ql/src/test/results/clientpositive/llap/bucketmapjoin2.q.out index 84de3e3..191e7e3 100644 --- a/ql/src/test/results/clientpositive/llap/bucketmapjoin2.q.out +++ b/ql/src/test/results/clientpositive/llap/bucketmapjoin2.q.out @@ -804,13 +804,11 @@ POSTHOOK: Input: default@bucketmapjoin_hash_result_1 POSTHOOK: Input: default@bucketmapjoin_hash_result_2 #### A masked pattern was here #### 0 0 0 -PREHOOK: query: -- HIVE-3210 -load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09') +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: -- HIVE-3210 -load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09') +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@srcbucket_mapjoin_part_2 diff --git a/ql/src/test/results/clientpositive/llap/bucketmapjoin6.q.out b/ql/src/test/results/clientpositive/llap/bucketmapjoin6.q.out index 198404b..c9b344e 100644 --- a/ql/src/test/results/clientpositive/llap/bucketmapjoin6.q.out +++ b/ql/src/test/results/clientpositive/llap/bucketmapjoin6.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table tmp1 (a string, b string) clustered by (a) sorted by (a) into 10 buckets +PREHOOK: query: create table tmp1 (a string, b string) clustered by (a) sorted by (a) into 10 buckets PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmp1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table tmp1 (a string, b string) clustered by (a) sorted by (a) into 10 buckets +POSTHOOK: query: create table tmp1 (a string, b string) clustered by (a) sorted by (a) into 10 buckets POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmp1 diff --git a/ql/src/test/results/clientpositive/llap/bucketmapjoin7.q.out b/ql/src/test/results/clientpositive/llap/bucketmapjoin7.q.out index b515af6..4183511 100644 --- a/ql/src/test/results/clientpositive/llap/bucketmapjoin7.q.out +++ b/ql/src/test/results/clientpositive/llap/bucketmapjoin7.q.out @@ -52,17 +52,13 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08/hr=0 -PREHOOK: query: -- Tests that bucket map join works with a table with more than one level of partitioning - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.ds = '2008-04-08' AND b.ds = '2008-04-08' ORDER BY a.key, b.value LIMIT 1 PREHOOK: type: QUERY -POSTHOOK: query: -- Tests that bucket map join works with a table with more than one level of partitioning - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.ds = '2008-04-08' AND b.ds = '2008-04-08' diff --git a/ql/src/test/results/clientpositive/llap/bucketpruning1.q.out b/ql/src/test/results/clientpositive/llap/bucketpruning1.q.out index f6ddfe9..922f23a 100644 --- a/ql/src/test/results/clientpositive/llap/bucketpruning1.q.out +++ b/ql/src/test/results/clientpositive/llap/bucketpruning1.q.out @@ -6,24 +6,18 @@ POSTHOOK: query: CREATE TABLE srcbucket_pruned(key int, value string) partitione POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@srcbucket_pruned -PREHOOK: query: -- cannot prune 2-key scenarios without a smarter optimizer -CREATE TABLE srcbucket_unpruned(key int, value string) partitioned by (ds string) CLUSTERED BY (key,value) INTO 16 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE srcbucket_unpruned(key int, value string) partitioned by (ds string) CLUSTERED BY (key,value) INTO 16 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@srcbucket_unpruned -POSTHOOK: query: -- cannot prune 2-key scenarios without a smarter optimizer -CREATE TABLE srcbucket_unpruned(key int, value string) partitioned by (ds string) CLUSTERED BY (key,value) INTO 16 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE srcbucket_unpruned(key int, value string) partitioned by (ds string) CLUSTERED BY (key,value) INTO 16 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@srcbucket_unpruned -PREHOOK: query: -- good cases - -explain extended +PREHOOK: query: explain extended select * from srcbucket_pruned where key = 1 PREHOOK: type: QUERY -POSTHOOK: query: -- good cases - -explain extended +POSTHOOK: query: explain extended select * from srcbucket_pruned where key = 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -886,14 +880,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- compat case (-15 = 1 & 15) - -explain extended +PREHOOK: query: explain extended select * from srcbucket_pruned where key = -15 PREHOOK: type: QUERY -POSTHOOK: query: -- compat case (-15 = 1 & 15) - -explain extended +POSTHOOK: query: explain extended select * from srcbucket_pruned where key = -15 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -952,14 +942,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- valid but irrelevant case (all buckets selected) - -explain extended +PREHOOK: query: explain extended select * from srcbucket_pruned where key in (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17) PREHOOK: type: QUERY -POSTHOOK: query: -- valid but irrelevant case (all buckets selected) - -explain extended +POSTHOOK: query: explain extended select * from srcbucket_pruned where key in (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1200,14 +1186,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- valid, but unimplemented cases - -explain extended +PREHOOK: query: explain extended select * from srcbucket_pruned where key = 1 and ds='2008-04-08' or key = 2 PREHOOK: type: QUERY -POSTHOOK: query: -- valid, but unimplemented cases - -explain extended +POSTHOOK: query: explain extended select * from srcbucket_pruned where key = 1 and ds='2008-04-08' or key = 2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1387,14 +1369,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Invalid cases - -explain extended +PREHOOK: query: explain extended select * from srcbucket_pruned where key = 'x11' PREHOOK: type: QUERY -POSTHOOK: query: -- Invalid cases - -explain extended +POSTHOOK: query: explain extended select * from srcbucket_pruned where key = 'x11' POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_2.q.out b/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_2.q.out index 8519ff3..e4fed11 100644 --- a/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_2.q.out +++ b/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_2.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -78,17 +76,13 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table2@ds=2 POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b @@ -261,17 +255,13 @@ POSTHOOK: Input: default@test_table3@ds=1 5 val_5val_5 1 5 val_5val_5 1 9 val_9val_9 1 -PREHOOK: query: -- Since more than one partition of 'a' (the big table) is being selected, --- it should be a map-reduce job -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b ON a.key = b.key WHERE a.ds is not null and b.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Since more than one partition of 'a' (the big table) is being selected, --- it should be a map-reduce job -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b @@ -468,17 +458,13 @@ POSTHOOK: Input: default@test_table3@ds=1 5 val_5val_5 1 9 val_9val_9 1 9 val_9val_9 1 -PREHOOK: query: -- Since a single partition of the big table ('a') is being selected, it should be a map-only --- job even though multiple partitions of 'b' are being selected -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b ON a.key = b.key WHERE a.ds = '1' and b.ds is not null PREHOOK: type: QUERY -POSTHOOK: query: -- Since a single partition of the big table ('a') is being selected, it should be a map-only --- job even though multiple partitions of 'b' are being selected -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b @@ -675,8 +661,7 @@ POSTHOOK: Input: default@test_table3@ds=1 5 val_5val_5 1 9 val_9val_9 1 9 val_9val_9 1 -PREHOOK: query: -- This should be a map-only job -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM @@ -685,8 +670,7 @@ JOIN (select key, value from test_table2 where ds = '1') b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- This should be a map-only job -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM @@ -868,8 +852,7 @@ POSTHOOK: Input: default@test_table3@ds=1 5 val_5val_5 1 5 val_5val_5 1 9 val_9val_9 1 -PREHOOK: query: -- This should be a map-only job -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.v1, b.v2) FROM @@ -878,8 +861,7 @@ JOIN (select key, concat(value, value) as v2 from test_table2 where ds = '1') b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- This should be a map-only job -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.v1, b.v2) FROM @@ -1061,8 +1043,7 @@ POSTHOOK: Input: default@test_table3@ds=1 5 val_5val_5val_5val_5 1 5 val_5val_5val_5val_5 1 9 val_9val_9val_9val_9 1 -PREHOOK: query: -- This should be a map-reduce job -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key+a.key, concat(a.value, b.value) FROM @@ -1071,8 +1052,7 @@ JOIN (select key, value from test_table2 where ds = '1') b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- This should be a map-reduce job -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key+a.key, concat(a.value, b.value) FROM diff --git a/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_6.q.out b/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_6.q.out index 4b4a95e..67e925a 100644 --- a/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_6.q.out +++ b/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_6.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -56,17 +54,13 @@ POSTHOOK: Output: default@test_table2@ds=1 POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation, since the sort-order matches -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, a.key2, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation, since the sort-order matches -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, a.key2, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b @@ -240,9 +234,7 @@ POSTHOOK: Input: default@test_table3@ds=1 5 6 val_5val_5 1 8 9 val_8val_8 1 9 10 val_9val_9 1 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation, since the sort-order matches -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT subq1.key, subq1.key2, subq1.value from ( @@ -251,9 +243,7 @@ FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' )subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation, since the sort-order matches -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT subq1.key, subq1.key2, subq1.value from ( @@ -436,17 +426,13 @@ POSTHOOK: Input: default@test_table3@ds=1 5 6 val_5val_5 1 8 9 val_8val_8 1 9 10 val_9val_9 1 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key2, a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key2, a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b @@ -555,9 +541,7 @@ STAGE PLANS: Stage: Stage-3 Stats-Aggr Operator -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT subq1.key2, subq1.key, subq1.value from ( @@ -566,9 +550,7 @@ FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' )subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT subq1.key2, subq1.key, subq1.value from ( @@ -680,9 +662,7 @@ STAGE PLANS: Stage: Stage-3 Stats-Aggr Operator -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT subq2.key, subq2.key2, subq2.value from ( @@ -694,9 +674,7 @@ ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' )subq1 )subq2 PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT subq2.key, subq2.key2, subq2.value from ( @@ -888,9 +866,7 @@ POSTHOOK: Input: default@test_table3@ds=1 5 6 val_5val_5 1 8 9 val_8val_8 1 9 10 val_9val_9 1 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT subq2.k2, subq2.k1, subq2.value from ( @@ -902,9 +878,7 @@ ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' )subq1 )subq2 PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT subq2.k2, subq2.k1, subq2.value from ( @@ -1106,9 +1080,7 @@ CLUSTERED BY (key, key2) SORTED BY (key DESC, key2 DESC) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table4 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table4 PARTITION (ds = '1') SELECT subq2.k2, subq2.k1, subq2.value from ( @@ -1120,9 +1092,7 @@ ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' )subq1 )subq2 PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table4 PARTITION (ds = '1') SELECT subq2.k2, subq2.k1, subq2.value from ( diff --git a/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_7.q.out b/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_7.q.out index 11de932..8691d0d 100644 --- a/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_7.q.out +++ b/ql/src/test/results/clientpositive/llap/bucketsortoptimize_insert_7.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -54,18 +52,14 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table2@ds=1 POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' and (a.key = 0 or a.key = 5) PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b @@ -237,8 +231,7 @@ POSTHOOK: Input: default@test_table3@ds=1 5 val_5val_5 1 5 val_5val_5 1 5 val_5val_5 1 -PREHOOK: query: -- This should be a map-only job -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM @@ -247,8 +240,7 @@ JOIN (select key, value from test_table2 where ds = '1' and (key = 0 or key = 5)) b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- This should be a map-only job -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM @@ -426,8 +418,7 @@ POSTHOOK: Input: default@test_table3@ds=1 5 val_5val_5 1 5 val_5val_5 1 5 val_5val_5 1 -PREHOOK: query: -- This should be a map-only job -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM @@ -437,8 +428,7 @@ JOIN ON a.key = b.key WHERE a.key = 0 or a.key = 5 PREHOOK: type: QUERY -POSTHOOK: query: -- This should be a map-only job -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM diff --git a/ql/src/test/results/clientpositive/llap/cbo_gby.q.out b/ql/src/test/results/clientpositive/llap/cbo_gby.q.out index 04597a7..d1fe9e9 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_gby.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_gby.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 6. Test Select + TS + Join + Fil + GB + GB Having -select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key +PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 6. Test Select + TS + Join + Fil + GB + GB Having -select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key +POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/llap/cbo_gby_empty.q.out b/ql/src/test/results/clientpositive/llap/cbo_gby_empty.q.out index 68f0255..6970fd2 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_gby_empty.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_gby_empty.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- 21. Test groupby is empty and there is no other cols in aggr -select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc +PREHOOK: query: select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- 21. Test groupby is empty and there is no other cols in aggr -select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc +POSTHOOK: query: select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/llap/cbo_join.q.out b/ql/src/test/results/clientpositive/llap/cbo_join.q.out index c5e9858..4287788 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_join.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_join.q.out @@ -1,15 +1,11 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- 4. Test Select + Join + TS -select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key +PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 PREHOOK: Input: default@cbo_t2 PREHOOK: Input: default@cbo_t2@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS --- 4. Test Select + Join + TS -select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key +POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 @@ -6968,16 +6964,14 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: -- 5. Test Select + Join + FIL + TS -select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 PREHOOK: Input: default@cbo_t2 PREHOOK: Input: default@cbo_t2@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 5. Test Select + Join + FIL + TS -select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/llap/cbo_limit.q.out b/ql/src/test/results/clientpositive/llap/cbo_limit.q.out index 13df214..c582578 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_limit.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_limit.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit -select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1 +PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit -select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1 +POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/llap/cbo_rp_gby.q.out b/ql/src/test/results/clientpositive/llap/cbo_rp_gby.q.out index 04597a7..d1fe9e9 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_rp_gby.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_rp_gby.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 6. Test Select + TS + Join + Fil + GB + GB Having -select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key +PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 6. Test Select + TS + Join + Fil + GB + GB Having -select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key +POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/llap/cbo_rp_join.q.out b/ql/src/test/results/clientpositive/llap/cbo_rp_join.q.out index c5e9858..4287788 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_rp_join.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_rp_join.q.out @@ -1,15 +1,11 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- 4. Test Select + Join + TS -select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key +PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 PREHOOK: Input: default@cbo_t2 PREHOOK: Input: default@cbo_t2@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS --- 4. Test Select + Join + TS -select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key +POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 @@ -6968,16 +6964,14 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: -- 5. Test Select + Join + FIL + TS -select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +PREHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 PREHOOK: Input: default@cbo_t2 PREHOOK: Input: default@cbo_t2@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 5. Test Select + Join + FIL + TS -select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) +POSTHOOK: query: select cbo_t1.c_int, cbo_t2.c_int from cbo_t1 join cbo_t2 on cbo_t1.key=cbo_t2.key where (cbo_t1.c_int + cbo_t2.c_int == 2) and (cbo_t1.c_int > 0 or cbo_t2.c_float >= 0) POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/llap/cbo_rp_limit.q.out b/ql/src/test/results/clientpositive/llap/cbo_rp_limit.q.out index 13df214..c582578 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_rp_limit.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_rp_limit.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit -select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1 +PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit -select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1 +POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/llap/cbo_rp_semijoin.q.out b/ql/src/test/results/clientpositive/llap/cbo_rp_semijoin.q.out index bdd8125..6f79549 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_rp_semijoin.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_rp_semijoin.q.out @@ -1,13 +1,11 @@ -PREHOOK: query: -- 12. SemiJoin -select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key +PREHOOK: query: select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 PREHOOK: Input: default@cbo_t2 PREHOOK: Input: default@cbo_t2@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 12. SemiJoin -select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key +POSTHOOK: query: select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/llap/cbo_rp_subq_in.q.out b/ql/src/test/results/clientpositive/llap/cbo_rp_subq_in.q.out index f6bfad2..39a4f8a 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_rp_subq_in.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_rp_subq_in.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- 17. SubQueries In --- non agg, non corr -select * +PREHOOK: query: select * from src_cbo where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') order by key PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- 17. SubQueries In --- non agg, non corr -select * +POSTHOOK: query: select * from src_cbo where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') order by key POSTHOOK: type: QUERY @@ -25,11 +21,7 @@ POSTHOOK: Input: default@src_cbo 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- agg, corr --- add back once rank issue fixed for cbo - --- distinct, corr -select * +PREHOOK: query: select * from src_cbo b where b.key in (select distinct a.key @@ -39,11 +31,7 @@ where b.key in PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- agg, corr --- add back once rank issue fixed for cbo - --- distinct, corr -select * +POSTHOOK: query: select * from src_cbo b where b.key in (select distinct a.key @@ -64,8 +52,7 @@ POSTHOOK: Input: default@src_cbo 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- non agg, corr, with join in Parent Query -select p.p_partkey, li.l_suppkey +PREHOOK: query: select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) @@ -73,8 +60,7 @@ where li.l_linenumber = 1 and PREHOOK: type: QUERY PREHOOK: Input: default@lineitem #### A masked pattern was here #### -POSTHOOK: query: -- non agg, corr, with join in Parent Query -select p.p_partkey, li.l_suppkey +POSTHOOK: query: select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) @@ -84,13 +70,7 @@ POSTHOOK: Input: default@lineitem #### A masked pattern was here #### 4297 1798 108570 8571 -PREHOOK: query: -- where and having --- Plan is: --- Stage 1: b semijoin sq1:src_cbo (subquery in where) --- Stage 2: group by Stage 1 o/p --- Stage 5: group by on sq2:src_cbo (subquery in having) --- Stage 6: Stage 2 o/p semijoin Stage 5 -select key, value, count(*) +PREHOOK: query: select key, value, count(*) from src_cbo b where b.key in (select key from src_cbo where src_cbo.key > '8') group by key, value @@ -98,13 +78,7 @@ having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- where and having --- Plan is: --- Stage 1: b semijoin sq1:src_cbo (subquery in where) --- Stage 2: group by Stage 1 o/p --- Stage 5: group by on sq2:src_cbo (subquery in having) --- Stage 6: Stage 2 o/p semijoin Stage 5 -select key, value, count(*) +POSTHOOK: query: select key, value, count(*) from src_cbo b where b.key in (select key from src_cbo where src_cbo.key > '8') group by key, value @@ -126,8 +100,7 @@ POSTHOOK: Input: default@src_cbo 96 val_96 1 97 val_97 2 98 val_98 2 -PREHOOK: query: -- non agg, non corr, windowing -select p_mfgr, p_name, avg(p_size) +PREHOOK: query: select p_mfgr, p_name, avg(p_size) from part group by p_mfgr, p_name having p_name in @@ -135,8 +108,7 @@ having p_name in PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- non agg, non corr, windowing -select p_mfgr, p_name, avg(p_size) +POSTHOOK: query: select p_mfgr, p_name, avg(p_size) from part group by p_mfgr, p_name having p_name in diff --git a/ql/src/test/results/clientpositive/llap/cbo_rp_subq_not_in.q.out b/ql/src/test/results/clientpositive/llap/cbo_rp_subq_not_in.q.out index c7274f7..c006d11 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_rp_subq_not_in.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_rp_subq_not_in.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- 16. SubQueries Not In --- non agg, non corr -select * +PREHOOK: query: select * from src_cbo where src_cbo.key not in ( select key from src_cbo s1 @@ -9,9 +7,7 @@ where src_cbo.key not in PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- 16. SubQueries Not In --- non agg, non corr -select * +POSTHOOK: query: select * from src_cbo where src_cbo.key not in ( select key from src_cbo s1 @@ -139,8 +135,7 @@ POSTHOOK: Input: default@src_cbo 199 val_199 199 val_199 2 val_2 -PREHOOK: query: -- non agg, corr -select p_mfgr, b.p_name, p_size +PREHOOK: query: select p_mfgr, b.p_name, p_size from part b where b.p_name not in (select p_name @@ -150,8 +145,7 @@ where b.p_name not in PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- non agg, corr -select p_mfgr, b.p_name, p_size +POSTHOOK: query: select p_mfgr, b.p_name, p_size from part b where b.p_name not in (select p_name @@ -179,8 +173,7 @@ Manufacturer#4 almond antique violet mint lemon 39 Manufacturer#5 almond azure blanched chiffon midnight 23 Manufacturer#5 almond antique blue firebrick mint 31 Manufacturer#5 almond aquamarine dodger light gainsboro 46 -PREHOOK: query: -- agg, non corr -select p_name, p_size +PREHOOK: query: select p_name, p_size from part where part.p_size not in (select avg(p_size) @@ -190,8 +183,7 @@ part where part.p_size not in PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- agg, non corr -select p_name, p_size +POSTHOOK: query: select p_name, p_size from part where part.p_size not in (select avg(p_size) @@ -227,8 +219,7 @@ almond aquamarine sandy cyan gainsboro 18 almond aquamarine yellow dodger mint 7 almond azure aquamarine papaya violet 12 almond azure blanched chiffon midnight 23 -PREHOOK: query: -- agg, corr -select p_mfgr, p_name, p_size +PREHOOK: query: select p_mfgr, p_name, p_size from part b where b.p_size not in (select min(p_size) from (select p_mfgr, p_size from part) a @@ -237,8 +228,7 @@ from part b where b.p_size not in PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- agg, corr -select p_mfgr, p_name, p_size +POSTHOOK: query: select p_mfgr, p_name, p_size from part b where b.p_size not in (select min(p_size) from (select p_mfgr, p_size from part) a @@ -267,8 +257,7 @@ Manufacturer#2 almond aquamarine rose maroon antique 25 Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 Manufacturer#4 almond azure aquamarine papaya violet 12 Manufacturer#5 almond azure blanched chiffon midnight 23 -PREHOOK: query: -- non agg, non corr, Group By in Parent Query -select li.l_partkey, count(*) +PREHOOK: query: select li.l_partkey, count(*) from lineitem li where li.l_linenumber = 1 and li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') @@ -276,8 +265,7 @@ group by li.l_partkey order by li.l_partkey PREHOOK: type: QUERY PREHOOK: Input: default@lineitem #### A masked pattern was here #### -POSTHOOK: query: -- non agg, non corr, Group By in Parent Query -select li.l_partkey, count(*) +POSTHOOK: query: select li.l_partkey, count(*) from lineitem li where li.l_linenumber = 1 and li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') @@ -301,10 +289,7 @@ POSTHOOK: Input: default@lineitem 139636 1 175839 1 182052 1 -PREHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. - --- non agg, corr, having -select b.p_mfgr, min(p_retailprice) +PREHOOK: query: select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr having b.p_mfgr not in @@ -316,10 +301,7 @@ having b.p_mfgr not in PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. - --- non agg, corr, having -select b.p_mfgr, min(p_retailprice) +POSTHOOK: query: select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr having b.p_mfgr not in @@ -333,8 +315,7 @@ POSTHOOK: Input: default@part #### A masked pattern was here #### Manufacturer#1 1173.15 Manufacturer#2 1690.68 -PREHOOK: query: -- agg, non corr, having -select b.p_mfgr, min(p_retailprice) +PREHOOK: query: select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr having b.p_mfgr not in @@ -347,8 +328,7 @@ having b.p_mfgr not in PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- agg, non corr, having -select b.p_mfgr, min(p_retailprice) +POSTHOOK: query: select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr having b.p_mfgr not in diff --git a/ql/src/test/results/clientpositive/llap/cbo_rp_udf_udaf.q.out b/ql/src/test/results/clientpositive/llap/cbo_rp_udf_udaf.q.out index b30d9da..c5270f7 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_rp_udf_udaf.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_rp_udf_udaf.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 8. Test UDF/UDAF -select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 +PREHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 8. Test UDF/UDAF -select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 +POSTHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/llap/cbo_rp_udf_udaf_stats_opt.q.out b/ql/src/test/results/clientpositive/llap/cbo_rp_udf_udaf_stats_opt.q.out index 3a589b4..4cebe36 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_rp_udf_udaf_stats_opt.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_rp_udf_udaf_stats_opt.q.out @@ -1,16 +1,10 @@ Warning: Value had a \n character in it. -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 8. Test UDF/UDAF -select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 +PREHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 8. Test UDF/UDAF -select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 +POSTHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/llap/cbo_rp_unionDistinct_2.q.out b/ql/src/test/results/clientpositive/llap/cbo_rp_unionDistinct_2.q.out index 10609d9..7cc6324 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_rp_unionDistinct_2.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_rp_unionDistinct_2.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE u1 as select key, value from src order by key limit 5 +PREHOOK: query: CREATE TABLE u1 as select key, value from src order by key limit 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@u1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE u1 as select key, value from src order by key limit 5 +POSTHOOK: query: CREATE TABLE u1 as select key, value from src order by key limit 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/llap/cbo_rp_views.q.out b/ql/src/test/results/clientpositive/llap/cbo_rp_views.q.out index 4a7b935..e412ee1 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_rp_views.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_rp_views.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- 10. Test views -create view v1 as select c_int, value, c_boolean, dt from cbo_t1 +PREHOOK: query: create view v1 as select c_int, value, c_boolean, dt from cbo_t1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@cbo_t1 PREHOOK: Output: database:default PREHOOK: Output: default@v1 -POSTHOOK: query: -- 10. Test views -create view v1 as select c_int, value, c_boolean, dt from cbo_t1 +POSTHOOK: query: create view v1 as select c_int, value, c_boolean, dt from cbo_t1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@cbo_t1 POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/llap/cbo_rp_windowing_2.q.out b/ql/src/test/results/clientpositive/llap/cbo_rp_windowing_2.q.out index 3434336..887213e 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_rp_windowing_2.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_rp_windowing_2.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 1. testWindowing -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s1 @@ -9,10 +6,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 1. testWindowing -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s1 @@ -46,8 +40,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 2. testGroupByWithPartitioning -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, min(p_retailprice), rank() over(distribute by p_mfgr sort by p_name)as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, @@ -57,8 +50,7 @@ group by p_mfgr, p_name, p_size PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 2. testGroupByWithPartitioning -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, min(p_retailprice), rank() over(distribute by p_mfgr sort by p_name)as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, @@ -93,8 +85,7 @@ Manufacturer#5 almond antique medium spring khaki 6 1611.66 2 2 6 -25 Manufacturer#5 almond antique sky peru orange 2 1788.73 3 3 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 46 1018.1 4 4 46 44 Manufacturer#5 almond azure blanched chiffon midnight 23 1464.48 5 5 23 -23 -PREHOOK: query: -- 3. testGroupByHavingWithSWQ -select p_mfgr, p_name, p_size, min(p_retailprice), +PREHOOK: query: select p_mfgr, p_name, p_size, min(p_retailprice), rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over(distribute by p_mfgr sort by p_name) as deltaSz @@ -104,8 +95,7 @@ having p_size > 0 PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 3. testGroupByHavingWithSWQ -select p_mfgr, p_name, p_size, min(p_retailprice), +POSTHOOK: query: select p_mfgr, p_name, p_size, min(p_retailprice), rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over(distribute by p_mfgr sort by p_name) as deltaSz @@ -140,15 +130,13 @@ Manufacturer#5 almond antique medium spring khaki 6 1611.66 2 2 6 -25 Manufacturer#5 almond antique sky peru orange 2 1788.73 3 3 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 46 1018.1 4 4 46 44 Manufacturer#5 almond azure blanched chiffon midnight 23 1464.48 5 5 23 -23 -PREHOOK: query: -- 4. testCount -select p_mfgr, p_name, +PREHOOK: query: select p_mfgr, p_name, count(p_size) over(distribute by p_mfgr sort by p_name) as cd from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 4. testCount -select p_mfgr, p_name, +POSTHOOK: query: select p_mfgr, p_name, count(p_size) over(distribute by p_mfgr sort by p_name) as cd from part POSTHOOK: type: QUERY @@ -180,8 +168,7 @@ Manufacturer#5 almond antique medium spring khaki 2 Manufacturer#5 almond antique sky peru orange 3 Manufacturer#5 almond aquamarine dodger light gainsboro 4 Manufacturer#5 almond azure blanched chiffon midnight 5 -PREHOOK: query: -- 5. testCountWithWindowingUDAF -select p_mfgr, p_name, +PREHOOK: query: select p_mfgr, p_name, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, count(p_size) over(distribute by p_mfgr sort by p_name) as cd, @@ -191,8 +178,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 5. testCountWithWindowingUDAF -select p_mfgr, p_name, +POSTHOOK: query: select p_mfgr, p_name, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, count(p_size) over(distribute by p_mfgr sort by p_name) as cd, @@ -228,8 +214,7 @@ Manufacturer#5 almond antique medium spring khaki 2 2 2 1611.66 3401.35000000000 Manufacturer#5 almond antique sky peru orange 3 3 3 1788.73 5190.08 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 4 4 4 1018.1 6208.18 46 44 Manufacturer#5 almond azure blanched chiffon midnight 5 5 5 1464.48 7672.66 23 -23 -PREHOOK: query: -- 6. testCountInSubQ -select sub1.r, sub1.dr, sub1.cd, sub1.s1, sub1.deltaSz +PREHOOK: query: select sub1.r, sub1.dr, sub1.cd, sub1.s1, sub1.deltaSz from (select p_mfgr, p_name, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, @@ -241,8 +226,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 6. testCountInSubQ -select sub1.r, sub1.dr, sub1.cd, sub1.s1, sub1.deltaSz +POSTHOOK: query: select sub1.r, sub1.dr, sub1.cd, sub1.s1, sub1.deltaSz from (select p_mfgr, p_name, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, @@ -280,8 +264,7 @@ POSTHOOK: Input: default@part 5 5 5 7672.66 -23 5 5 5 8923.62 -7 6 5 6 8749.730000000001 14 -PREHOOK: query: -- 7. testJoinWithWindowingAndPTF -select abc.p_mfgr, abc.p_name, +PREHOOK: query: select abc.p_mfgr, abc.p_name, rank() over(distribute by abc.p_mfgr sort by abc.p_name) as r, dense_rank() over(distribute by abc.p_mfgr sort by abc.p_name) as dr, abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, @@ -293,8 +276,7 @@ order by p_name PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 7. testJoinWithWindowingAndPTF -select abc.p_mfgr, abc.p_name, +POSTHOOK: query: select abc.p_mfgr, abc.p_name, rank() over(distribute by abc.p_mfgr sort by abc.p_name) as r, dense_rank() over(distribute by abc.p_mfgr sort by abc.p_name) as dr, abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, @@ -334,15 +316,13 @@ Manufacturer#5 almond antique medium spring khaki 2 2 1611.66 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 3 3 1788.73 5190.08 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 4 4 1018.1 6208.18 46 44 Manufacturer#5 almond azure blanched chiffon midnight 5 5 1464.48 7672.66 23 -23 -PREHOOK: query: -- 8. testMixedCaseAlias -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name, p_size desc) as R from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 8. testMixedCaseAlias -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name, p_size desc) as R from part POSTHOOK: type: QUERY @@ -374,8 +354,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 Manufacturer#5 almond antique sky peru orange 2 3 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 Manufacturer#5 almond azure blanched chiffon midnight 23 5 -PREHOOK: query: -- 9. testHavingWithWindowingNoGBY -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s1 @@ -383,8 +362,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 9. testHavingWithWindowingNoGBY -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s1 @@ -418,8 +396,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 10. testHavingWithWindowingCondRankNoGBY -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s1 @@ -427,8 +404,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 10. testHavingWithWindowingCondRankNoGBY -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s1 @@ -462,8 +438,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 11. testFirstLast -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, sum(p_size) over (distribute by p_mfgr sort by p_name rows between current row and current row) as s2, first_value(p_size) over w1 as f, last_value(p_size, false) over w1 as l @@ -472,8 +447,7 @@ window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 11. testFirstLast -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, sum(p_size) over (distribute by p_mfgr sort by p_name rows between current row and current row) as s2, first_value(p_size) over w1 as f, last_value(p_size, false) over w1 as l @@ -508,8 +482,7 @@ Manufacturer#5 almond antique medium spring khaki 6 6 31 46 Manufacturer#5 almond antique sky peru orange 2 2 31 23 Manufacturer#5 almond aquamarine dodger light gainsboro 46 46 6 23 Manufacturer#5 almond azure blanched chiffon midnight 23 23 2 23 -PREHOOK: query: -- 12. testFirstLastWithWhere -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, sum(p_size) over (distribute by p_mfgr sort by p_name rows between current row and current row) as s2, first_value(p_size) over w1 as f, @@ -520,8 +493,7 @@ window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 12. testFirstLastWithWhere -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, sum(p_size) over (distribute by p_mfgr sort by p_name rows between current row and current row) as s2, first_value(p_size) over w1 as f, @@ -537,8 +509,7 @@ Manufacturer#3 almond antique forest lavender goldenrod 14 2 14 17 1 Manufacturer#3 almond antique metallic orange dim 19 3 19 17 45 Manufacturer#3 almond antique misty red olive 1 4 1 14 45 Manufacturer#3 almond antique olive coral navajo 45 5 45 19 45 -PREHOOK: query: -- 13. testSumWindow -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over (distribute by p_mfgr sort by p_name rows between current row and current row) as s2 from part @@ -546,8 +517,7 @@ window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 13. testSumWindow -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over (distribute by p_mfgr sort by p_name rows between current row and current row) as s2 from part @@ -581,16 +551,14 @@ Manufacturer#5 almond antique medium spring khaki 6 85 6 Manufacturer#5 almond antique sky peru orange 2 108 2 Manufacturer#5 almond aquamarine dodger light gainsboro 46 77 46 Manufacturer#5 almond azure blanched chiffon midnight 23 71 23 -PREHOOK: query: -- 14. testNoSortClause -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr from part window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 following) PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 14. testNoSortClause -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr from part window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 following) @@ -623,8 +591,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 Manufacturer#5 almond antique sky peru orange 2 3 3 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 -PREHOOK: query: -- 15. testExpressions -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, cume_dist() over(distribute by p_mfgr sort by p_name) as cud, @@ -641,8 +608,7 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 precedi PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 15. testExpressions -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, cume_dist() over(distribute by p_mfgr sort by p_name) as cud, @@ -685,8 +651,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 0 0.25 1 2 18.5 12.5 1 6 Manufacturer#5 almond antique sky peru orange 2 3 3 0 0.5 2 3 13.0 12.832251036613439 1 2 31 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 0 0.75 2 4 21.25 18.102140757380052 1 46 6 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 1 1.0 3 5 21.6 16.206171663906314 1 23 2 -PREHOOK: query: -- 16. testMultipleWindows -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, cume_dist() over(distribute by p_mfgr sort by p_name) as cud, @@ -698,8 +663,7 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 precedi PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 16. testMultipleWindows -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, cume_dist() over(distribute by p_mfgr sort by p_name) as cud, @@ -737,8 +701,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 0 37 8 31 Manufacturer#5 almond antique sky peru orange 2 3 3 0 39 2 31 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 0 85 46 6 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 1 108 23 2 -PREHOOK: query: -- 17. testCountStar -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, count(*) over(distribute by p_mfgr sort by p_name ) as c, count(p_size) over(distribute by p_mfgr sort by p_name) as ca, first_value(p_size) over w1 as fvW1 @@ -747,8 +710,7 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 precedi PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 17. testCountStar -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, count(*) over(distribute by p_mfgr sort by p_name ) as c, count(p_size) over(distribute by p_mfgr sort by p_name) as ca, first_value(p_size) over w1 as fvW1 @@ -783,8 +745,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 31 Manufacturer#5 almond antique sky peru orange 2 3 3 31 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 2 -PREHOOK: query: -- 18. testUDAFs -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, sum(p_retailprice) over w1 as s, min(p_retailprice) over w1 as mi, max(p_retailprice) over w1 as ma, @@ -794,8 +755,7 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 precedi PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 18. testUDAFs -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, sum(p_retailprice) over w1 as s, min(p_retailprice) over w1 as mi, max(p_retailprice) over w1 as ma, @@ -831,8 +791,7 @@ Manufacturer#5 almond antique medium spring khaki 6 6208.18 1018.1 1789.69 1552. Manufacturer#5 almond antique sky peru orange 2 7672.66 1018.1 1789.69 1534.532 Manufacturer#5 almond aquamarine dodger light gainsboro 46 5882.969999999999 1018.1 1788.73 1470.7424999999998 Manufacturer#5 almond azure blanched chiffon midnight 23 4271.3099999999995 1018.1 1788.73 1423.7699999999998 -PREHOOK: query: -- 19. testUDAFsWithGBY -select p_mfgr,p_name, p_size, p_retailprice, +PREHOOK: query: select p_mfgr,p_name, p_size, p_retailprice, sum(p_retailprice) over w1 as s, min(p_retailprice) as mi , max(p_retailprice) as ma , @@ -843,8 +802,7 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 precedi PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 19. testUDAFsWithGBY -select p_mfgr,p_name, p_size, p_retailprice, +POSTHOOK: query: select p_mfgr,p_name, p_size, p_retailprice, sum(p_retailprice) over w1 as s, min(p_retailprice) as mi , max(p_retailprice) as ma , @@ -880,8 +838,7 @@ Manufacturer#5 almond antique medium spring khaki 6 1611.66 6208.18 1611.66 1611 Manufacturer#5 almond antique sky peru orange 2 1788.73 7672.66 1788.73 1788.73 1534.532 Manufacturer#5 almond aquamarine dodger light gainsboro 46 1018.1 5882.969999999999 1018.1 1018.1 1470.7424999999998 Manufacturer#5 almond azure blanched chiffon midnight 23 1464.48 4271.3099999999995 1464.48 1464.48 1423.7699999999998 -PREHOOK: query: -- 20. testSTATs -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, stddev(p_retailprice) over w1 as sdev, stddev_pop(p_retailprice) over w1 as sdev_pop, collect_set(p_size) over w1 as uniq_size, @@ -893,8 +850,7 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 precedi PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 20. testSTATs -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, stddev(p_retailprice) over w1 as sdev, stddev_pop(p_retailprice) over w1 as sdev_pop, collect_set(p_size) over w1 as uniq_size, @@ -932,8 +888,7 @@ Manufacturer#5 almond antique medium spring khaki 6 316.68049612345885 316.68049 Manufacturer#5 almond antique sky peru orange 2 285.40506298242155 285.40506298242155 [31,6,2,46,23] 81456.04997600002 -0.712858514567818 -3297.2011999999986 Manufacturer#5 almond aquamarine dodger light gainsboro 46 285.43749038756283 285.43749038756283 [6,2,46,23] 81474.56091875004 -0.984128787153391 -4871.028125000002 Manufacturer#5 almond azure blanched chiffon midnight 23 315.9225931564038 315.9225931564038 [2,46,23] 99807.08486666664 -0.9978877469246936 -5664.856666666666 -PREHOOK: query: -- 21. testDISTs -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, histogram_numeric(p_retailprice, 5) over w1 as hist, percentile(p_partkey, 0.5) over w1 as per, row_number() over(distribute by p_mfgr sort by p_mfgr, p_name) as rn @@ -942,8 +897,7 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 precedi PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 21. testDISTs -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, histogram_numeric(p_retailprice, 5) over w1 as hist, percentile(p_partkey, 0.5) over w1 as per, row_number() over(distribute by p_mfgr sort by p_mfgr, p_name) as rn @@ -978,8 +932,7 @@ Manufacturer#5 almond antique medium spring khaki 6 [{"x":1018.1,"y":1.0},{"x":1 Manufacturer#5 almond antique sky peru orange 2 [{"x":1018.1,"y":1.0},{"x":1464.48,"y":1.0},{"x":1611.66,"y":1.0},{"x":1788.73,"y":1.0},{"x":1789.69,"y":1.0}] 78486.0 3 Manufacturer#5 almond aquamarine dodger light gainsboro 46 [{"x":1018.1,"y":1.0},{"x":1464.48,"y":1.0},{"x":1611.66,"y":1.0},{"x":1788.73,"y":1.0}] 60577.5 4 Manufacturer#5 almond azure blanched chiffon midnight 23 [{"x":1018.1,"y":1.0},{"x":1464.48,"y":1.0},{"x":1788.73,"y":1.0}] 78486.0 5 -PREHOOK: query: -- 22. testViewAsTableInputWithWindowing -create view IF NOT EXISTS mfgr_price_view as +PREHOOK: query: create view IF NOT EXISTS mfgr_price_view as select p_mfgr, p_brand, round(sum(p_retailprice),2) as s from part @@ -988,8 +941,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@part PREHOOK: Output: database:default PREHOOK: Output: default@mfgr_price_view -POSTHOOK: query: -- 22. testViewAsTableInputWithWindowing -create view IF NOT EXISTS mfgr_price_view as +POSTHOOK: query: create view IF NOT EXISTS mfgr_price_view as select p_mfgr, p_brand, round(sum(p_retailprice),2) as s from part @@ -1070,8 +1022,7 @@ Manufacturer#4 Brand#42 2581.68 7337.62 Manufacturer#5 Brand#51 1611.66 1611.66 Manufacturer#5 Brand#52 3254.17 4865.83 Manufacturer#5 Brand#53 2806.83 7672.66 -PREHOOK: query: -- 23. testCreateViewWithWindowingQuery -create view IF NOT EXISTS mfgr_brand_price_view as +PREHOOK: query: create view IF NOT EXISTS mfgr_brand_price_view as select p_mfgr, p_brand, sum(p_retailprice) over w1 as s from part @@ -1080,8 +1031,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@part PREHOOK: Output: database:default PREHOOK: Output: default@mfgr_brand_price_view -POSTHOOK: query: -- 23. testCreateViewWithWindowingQuery -create view IF NOT EXISTS mfgr_brand_price_view as +POSTHOOK: query: create view IF NOT EXISTS mfgr_brand_price_view as select p_mfgr, p_brand, sum(p_retailprice) over w1 as s from part @@ -1126,8 +1076,7 @@ Manufacturer#5 Brand#52 1789.69 Manufacturer#5 Brand#52 4271.3099999999995 Manufacturer#5 Brand#53 4418.49 Manufacturer#5 Brand#53 5190.08 -PREHOOK: query: -- 24. testLateralViews -select p_mfgr, p_name, +PREHOOK: query: select p_mfgr, p_name, lv_col, p_size, sum(p_size) over w1 as s from (select p_mfgr, p_name, p_size, array(1,2,3) arr from part) p lateral view explode(arr) part_lv as lv_col @@ -1135,8 +1084,7 @@ window w1 as (distribute by p_mfgr sort by p_size, lv_col rows between 2 precedi PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 24. testLateralViews -select p_mfgr, p_name, +POSTHOOK: query: select p_mfgr, p_name, lv_col, p_size, sum(p_size) over w1 as s from (select p_mfgr, p_name, p_size, array(1,2,3) arr from part) p lateral view explode(arr) part_lv as lv_col @@ -1222,8 +1170,7 @@ Manufacturer#5 almond aquamarine dodger light gainsboro 3 46 138 Manufacturer#5 almond azure blanched chiffon midnight 1 23 35 Manufacturer#5 almond azure blanched chiffon midnight 2 23 52 Manufacturer#5 almond azure blanched chiffon midnight 3 23 69 -PREHOOK: query: -- 25. testMultipleInserts3SWQs -CREATE TABLE part_1( +PREHOOK: query: CREATE TABLE part_1( p_mfgr STRING, p_name STRING, p_size INT, @@ -1233,8 +1180,7 @@ s DOUBLE) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@part_1 -POSTHOOK: query: -- 25. testMultipleInserts3SWQs -CREATE TABLE part_1( +POSTHOOK: query: CREATE TABLE part_1( p_mfgr STRING, p_name STRING, p_size INT, @@ -1460,8 +1406,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 31 Manufacturer#5 almond antique sky peru orange 2 3 3 31 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 2 -PREHOOK: query: -- 26. testGroupByHavingWithSWQAndAlias -select p_mfgr, p_name, p_size, min(p_retailprice) as mi, +PREHOOK: query: select p_mfgr, p_name, p_size, min(p_retailprice) as mi, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over(distribute by p_mfgr sort by p_name) as deltaSz @@ -1471,8 +1416,7 @@ having p_size > 0 PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 26. testGroupByHavingWithSWQAndAlias -select p_mfgr, p_name, p_size, min(p_retailprice) as mi, +POSTHOOK: query: select p_mfgr, p_name, p_size, min(p_retailprice) as mi, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over(distribute by p_mfgr sort by p_name) as deltaSz @@ -1507,8 +1451,7 @@ Manufacturer#5 almond antique medium spring khaki 6 1611.66 2 2 6 -25 Manufacturer#5 almond antique sky peru orange 2 1788.73 3 3 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 46 1018.1 4 4 46 44 Manufacturer#5 almond azure blanched chiffon midnight 23 1464.48 5 5 23 -23 -PREHOOK: query: -- 27. testMultipleRangeWindows -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, sum(p_size) over (distribute by p_mfgr sort by p_size range between 10 preceding and current row) as s2, sum(p_size) over (distribute by p_mfgr sort by p_size range between current row and 10 following ) as s1 from part @@ -1516,8 +1459,7 @@ window w1 as (rows between 2 preceding and 2 following) PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 27. testMultipleRangeWindows -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, sum(p_size) over (distribute by p_mfgr sort by p_size range between 10 preceding and current row) as s2, sum(p_size) over (distribute by p_mfgr sort by p_size range between current row and 10 following ) as s1 from part @@ -1551,15 +1493,13 @@ Manufacturer#5 almond antique medium spring khaki 6 8 6 Manufacturer#5 almond antique sky peru orange 2 2 8 Manufacturer#5 almond aquamarine dodger light gainsboro 46 46 46 Manufacturer#5 almond azure blanched chiffon midnight 23 23 54 -PREHOOK: query: -- 28. testPartOrderInUDAFInvoke -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) as s from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 28. testPartOrderInUDAFInvoke -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) as s from part POSTHOOK: type: QUERY @@ -1591,16 +1531,14 @@ Manufacturer#5 almond antique medium spring khaki 6 85 Manufacturer#5 almond antique sky peru orange 2 108 Manufacturer#5 almond aquamarine dodger light gainsboro 46 77 Manufacturer#5 almond azure blanched chiffon midnight 23 71 -PREHOOK: query: -- 29. testPartOrderInWdwDef -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s from part window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 29. testPartOrderInWdwDef -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s from part window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) @@ -1633,8 +1571,7 @@ Manufacturer#5 almond antique medium spring khaki 6 85 Manufacturer#5 almond antique sky peru orange 2 108 Manufacturer#5 almond aquamarine dodger light gainsboro 46 77 Manufacturer#5 almond azure blanched chiffon midnight 23 71 -PREHOOK: query: -- 30. testDefaultPartitioningSpecRules -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s, sum(p_size) over w2 as s2 from part @@ -1643,8 +1580,7 @@ window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 30. testDefaultPartitioningSpecRules -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s, sum(p_size) over w2 as s2 from part @@ -1679,8 +1615,7 @@ Manufacturer#5 almond antique medium spring khaki 6 85 37 Manufacturer#5 almond antique sky peru orange 2 108 39 Manufacturer#5 almond aquamarine dodger light gainsboro 46 77 85 Manufacturer#5 almond azure blanched chiffon midnight 23 71 108 -PREHOOK: query: -- 31. testWindowCrossReference -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over w2 as s2 from part @@ -1689,8 +1624,7 @@ window w1 as (partition by p_mfgr order by p_name range between 2 preceding and PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 31. testWindowCrossReference -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over w2 as s2 from part @@ -1725,8 +1659,7 @@ Manufacturer#5 almond antique medium spring khaki 6 6 6 Manufacturer#5 almond antique sky peru orange 2 2 2 Manufacturer#5 almond aquamarine dodger light gainsboro 46 46 46 Manufacturer#5 almond azure blanched chiffon midnight 23 23 23 -PREHOOK: query: -- 32. testWindowInheritance -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over w2 as s2 from part @@ -1735,8 +1668,7 @@ window w1 as (partition by p_mfgr order by p_name range between 2 preceding and PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 32. testWindowInheritance -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over w2 as s2 from part @@ -1771,8 +1703,7 @@ Manufacturer#5 almond antique medium spring khaki 6 6 37 Manufacturer#5 almond antique sky peru orange 2 2 39 Manufacturer#5 almond aquamarine dodger light gainsboro 46 46 85 Manufacturer#5 almond azure blanched chiffon midnight 23 23 108 -PREHOOK: query: -- 33. testWindowForwardReference -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over w2 as s2, sum(p_size) over w3 as s3 @@ -1783,8 +1714,7 @@ window w1 as (distribute by p_mfgr sort by p_name range between 2 preceding and PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 33. testWindowForwardReference -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over w2 as s2, sum(p_size) over w3 as s3 @@ -1821,8 +1751,7 @@ Manufacturer#5 almond antique medium spring khaki 6 6 37 37 Manufacturer#5 almond antique sky peru orange 2 2 39 39 Manufacturer#5 almond aquamarine dodger light gainsboro 46 46 85 85 Manufacturer#5 almond azure blanched chiffon midnight 23 23 108 108 -PREHOOK: query: -- 34. testWindowDefinitionPropagation -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over w2 as s2, sum(p_size) over (w3 rows between 2 preceding and 2 following) as s3 @@ -1833,8 +1762,7 @@ window w1 as (distribute by p_mfgr sort by p_name range between 2 preceding and PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 34. testWindowDefinitionPropagation -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over w2 as s2, sum(p_size) over (w3 rows between 2 preceding and 2 following) as s3 @@ -1871,16 +1799,14 @@ Manufacturer#5 almond antique medium spring khaki 6 6 37 85 Manufacturer#5 almond antique sky peru orange 2 2 39 108 Manufacturer#5 almond aquamarine dodger light gainsboro 46 46 85 77 Manufacturer#5 almond azure blanched chiffon midnight 23 23 108 71 -PREHOOK: query: -- 35. testDistinctWithWindowing -select DISTINCT p_mfgr, p_name, p_size, +PREHOOK: query: select DISTINCT p_mfgr, p_name, p_size, sum(p_size) over w1 as s from part window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 following) PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 35. testDistinctWithWindowing -select DISTINCT p_mfgr, p_name, p_size, +POSTHOOK: query: select DISTINCT p_mfgr, p_name, p_size, sum(p_size) over w1 as s from part window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 following) @@ -1913,15 +1839,13 @@ Manufacturer#5 almond antique medium spring khaki 6 85 Manufacturer#5 almond antique sky peru orange 2 108 Manufacturer#5 almond aquamarine dodger light gainsboro 46 77 Manufacturer#5 almond azure blanched chiffon midnight 23 71 -PREHOOK: query: -- 36. testRankWithPartitioning -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name ) as r from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 36. testRankWithPartitioning -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name ) as r from part POSTHOOK: type: QUERY @@ -1953,8 +1877,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 Manufacturer#5 almond antique sky peru orange 2 3 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 Manufacturer#5 almond azure blanched chiffon midnight 23 5 -PREHOOK: query: -- 37. testPartitioningVariousForms -select p_mfgr, +PREHOOK: query: select p_mfgr, round(sum(p_retailprice) over (partition by p_mfgr order by p_mfgr),2) as s1, min(p_retailprice) over (partition by p_mfgr) as s2, max(p_retailprice) over (distribute by p_mfgr sort by p_mfgr) as s3, @@ -1964,8 +1887,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 37. testPartitioningVariousForms -select p_mfgr, +POSTHOOK: query: select p_mfgr, round(sum(p_retailprice) over (partition by p_mfgr order by p_mfgr),2) as s1, min(p_retailprice) over (partition by p_mfgr) as s2, max(p_retailprice) over (distribute by p_mfgr sort by p_mfgr) as s3, @@ -2001,8 +1923,7 @@ Manufacturer#5 7672.66 1018.1 1789.69 1534.53 5 Manufacturer#5 7672.66 1018.1 1789.69 1534.53 5 Manufacturer#5 7672.66 1018.1 1789.69 1534.53 5 Manufacturer#5 7672.66 1018.1 1789.69 1534.53 5 -PREHOOK: query: -- 38. testPartitioningVariousForms2 -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (partition by p_mfgr, p_name order by p_mfgr, p_name rows between unbounded preceding and current row) as s1, min(p_retailprice) over (distribute by p_mfgr, p_name sort by p_mfgr, p_name rows between unbounded preceding and current row) as s2, max(p_retailprice) over (partition by p_mfgr, p_name order by p_name) as s3 @@ -2010,8 +1931,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 38. testPartitioningVariousForms2 -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (partition by p_mfgr, p_name order by p_mfgr, p_name rows between unbounded preceding and current row) as s1, min(p_retailprice) over (distribute by p_mfgr, p_name sort by p_mfgr, p_name rows between unbounded preceding and current row) as s2, max(p_retailprice) over (partition by p_mfgr, p_name order by p_name) as s3 @@ -2045,15 +1965,13 @@ Manufacturer#5 almond antique medium spring khaki 6 1611.66 1611.66 1611.66 Manufacturer#5 almond antique sky peru orange 2 1788.73 1788.73 1788.73 Manufacturer#5 almond aquamarine dodger light gainsboro 46 1018.1 1018.1 1018.1 Manufacturer#5 almond azure blanched chiffon midnight 23 1464.48 1464.48 1464.48 -PREHOOK: query: -- 39. testUDFOnOrderCols -select p_mfgr, p_type, substr(p_type, 2) as short_ptype, +PREHOOK: query: select p_mfgr, p_type, substr(p_type, 2) as short_ptype, rank() over (partition by p_mfgr order by substr(p_type, 2)) as r from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 39. testUDFOnOrderCols -select p_mfgr, p_type, substr(p_type, 2) as short_ptype, +POSTHOOK: query: select p_mfgr, p_type, substr(p_type, 2) as short_ptype, rank() over (partition by p_mfgr order by substr(p_type, 2)) as r from part POSTHOOK: type: QUERY @@ -2085,15 +2003,13 @@ Manufacturer#5 LARGE BRUSHED BRASS ARGE BRUSHED BRASS 1 Manufacturer#5 MEDIUM BURNISHED TIN EDIUM BURNISHED TIN 3 Manufacturer#5 SMALL PLATED BRASS MALL PLATED BRASS 4 Manufacturer#5 STANDARD BURNISHED TIN TANDARD BURNISHED TIN 5 -PREHOOK: query: -- 40. testNoBetweenForRows -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows unbounded preceding) as s1 from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 40. testNoBetweenForRows -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows unbounded preceding) as s1 from part POSTHOOK: type: QUERY @@ -2125,15 +2041,13 @@ Manufacturer#5 almond antique medium spring khaki 6 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 7672.66 -PREHOOK: query: -- 41. testNoBetweenForRange -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (distribute by p_mfgr sort by p_size range unbounded preceding) as s1 from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 41. testNoBetweenForRange -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (distribute by p_mfgr sort by p_size range unbounded preceding) as s1 from part POSTHOOK: type: QUERY @@ -2165,15 +2079,13 @@ Manufacturer#5 almond antique medium spring khaki 6 3400.3900000000003 Manufacturer#5 almond antique sky peru orange 2 1788.73 Manufacturer#5 almond aquamarine dodger light gainsboro 46 7672.660000000002 Manufacturer#5 almond azure blanched chiffon midnight 23 4864.870000000001 -PREHOOK: query: -- 42. testUnboundedFollowingForRows -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between current row and unbounded following) as s1 from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 42. testUnboundedFollowingForRows -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between current row and unbounded following) as s1 from part POSTHOOK: type: QUERY @@ -2205,15 +2117,13 @@ Manufacturer#5 almond antique medium spring khaki 6 5882.970000000001 Manufacturer#5 almond antique sky peru orange 2 4271.3099999999995 Manufacturer#5 almond aquamarine dodger light gainsboro 46 2482.58 Manufacturer#5 almond azure blanched chiffon midnight 23 1464.48 -PREHOOK: query: -- 43. testUnboundedFollowingForRange -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (distribute by p_mfgr sort by p_size range between current row and unbounded following) as s1 from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 43. testUnboundedFollowingForRange -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (distribute by p_mfgr sort by p_size range between current row and unbounded following) as s1 from part POSTHOOK: type: QUERY @@ -2245,16 +2155,14 @@ Manufacturer#5 almond antique medium spring khaki 6 5883.93 Manufacturer#5 almond antique sky peru orange 2 7672.660000000002 Manufacturer#5 almond aquamarine dodger light gainsboro 46 1018.1 Manufacturer#5 almond azure blanched chiffon midnight 23 4272.27 -PREHOOK: query: -- 44. testOverNoPartitionSingleAggregate -select p_name, p_retailprice, +PREHOOK: query: select p_name, p_retailprice, round(avg(p_retailprice) over(),2) from part order by p_name PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 44. testOverNoPartitionSingleAggregate -select p_name, p_retailprice, +POSTHOOK: query: select p_name, p_retailprice, round(avg(p_retailprice) over(),2) from part order by p_name @@ -2287,32 +2195,28 @@ almond aquamarine sandy cyan gainsboro 1701.6 1546.78 almond aquamarine yellow dodger mint 1844.92 1546.78 almond azure aquamarine papaya violet 1290.35 1546.78 almond azure blanched chiffon midnight 1464.48 1546.78 -PREHOOK: query: -- 45. empty partition test -select p_mfgr, +PREHOOK: query: select p_mfgr, sum(p_size) over (partition by p_mfgr order by p_size rows between unbounded preceding and current row) from part where p_mfgr = 'Manufacturer#6' PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 45. empty partition test -select p_mfgr, +POSTHOOK: query: select p_mfgr, sum(p_size) over (partition by p_mfgr order by p_size rows between unbounded preceding and current row) from part where p_mfgr = 'Manufacturer#6' POSTHOOK: type: QUERY POSTHOOK: Input: default@part #### A masked pattern was here #### -PREHOOK: query: -- 46. window sz is same as partition sz -select p_retailprice, avg(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following), +PREHOOK: query: select p_retailprice, avg(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following), sum(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following) from part where p_mfgr='Manufacturer#1' PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 46. window sz is same as partition sz -select p_retailprice, avg(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following), +POSTHOOK: query: select p_retailprice, avg(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following), sum(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following) from part where p_mfgr='Manufacturer#1' @@ -2325,14 +2229,12 @@ POSTHOOK: Input: default@part 1602.59 1549.8900000000003 4649.670000000001 1632.66 1632.6600000000008 1632.6600000000008 1753.76 1600.8575000000003 6403.430000000001 -PREHOOK: query: -- 47. empty partition -select sum(p_size) over (partition by p_mfgr ) +PREHOOK: query: select sum(p_size) over (partition by p_mfgr ) from part where p_mfgr = 'm1' PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 47. empty partition -select sum(p_size) over (partition by p_mfgr ) +POSTHOOK: query: select sum(p_size) over (partition by p_mfgr ) from part where p_mfgr = 'm1' POSTHOOK: type: QUERY POSTHOOK: Input: default@part diff --git a/ql/src/test/results/clientpositive/llap/cbo_semijoin.q.out b/ql/src/test/results/clientpositive/llap/cbo_semijoin.q.out index bdd8125..6f79549 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_semijoin.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_semijoin.q.out @@ -1,13 +1,11 @@ -PREHOOK: query: -- 12. SemiJoin -select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key +PREHOOK: query: select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 PREHOOK: Input: default@cbo_t2 PREHOOK: Input: default@cbo_t2@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 12. SemiJoin -select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key +POSTHOOK: query: select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/llap/cbo_simple_select.q.out b/ql/src/test/results/clientpositive/llap/cbo_simple_select.q.out index d161d9f..2e06e61 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_simple_select.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_simple_select.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- 1. Test Select + TS -select * from cbo_t1 +PREHOOK: query: select * from cbo_t1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 1. Test Select + TS -select * from cbo_t1 +POSTHOOK: query: select * from cbo_t1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 @@ -130,14 +128,12 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -PREHOOK: query: -- 2. Test Select + TS + FIL -select * from cbo_t1 where cbo_t1.c_int >= 0 +PREHOOK: query: select * from cbo_t1 where cbo_t1.c_int >= 0 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 2. Test Select + TS + FIL -select * from cbo_t1 where cbo_t1.c_int >= 0 +POSTHOOK: query: select * from cbo_t1 where cbo_t1.c_int >= 0 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 @@ -244,14 +240,12 @@ POSTHOOK: Input: default@cbo_t1@dt=2014 1 1 25.0 1 1 25.0 1 1 25.0 -PREHOOK: query: -- 3 Test Select + Select + TS + FIL -select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 +PREHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 3 Test Select + Select + TS + FIL -select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 +POSTHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 @@ -694,13 +688,11 @@ POSTHOOK: Input: default@cbo_t1@dt=2014 2.0 1 25.0 2.0 1 25.0 2.0 1 25.0 -PREHOOK: query: -- 13. null expr in select list -select null from cbo_t3 +PREHOOK: query: select null from cbo_t3 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t3 #### A masked pattern was here #### -POSTHOOK: query: -- 13. null expr in select list -select null from cbo_t3 +POSTHOOK: query: select null from cbo_t3 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t3 #### A masked pattern was here #### @@ -724,28 +716,24 @@ NULL NULL NULL NULL -PREHOOK: query: -- 14. unary operator -select key from cbo_t1 where c_int = -6 or c_int = +6 +PREHOOK: query: select key from cbo_t1 where c_int = -6 or c_int = +6 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 14. unary operator -select key from cbo_t1 where c_int = -6 or c_int = +6 +POSTHOOK: query: select key from cbo_t1 where c_int = -6 or c_int = +6 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -PREHOOK: query: -- 15. query referencing only partition columns -select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' +PREHOOK: query: select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 PREHOOK: Input: default@cbo_t2 PREHOOK: Input: default@cbo_t2@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 15. query referencing only partition columns -select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' +POSTHOOK: query: select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/llap/cbo_stats.q.out b/ql/src/test/results/clientpositive/llap/cbo_stats.q.out index 554a8f0..3747d31 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_stats.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_stats.q.out @@ -1,12 +1,10 @@ -PREHOOK: query: -- 20. Test get stats with empty partition list -select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true +PREHOOK: query: select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t2 PREHOOK: Input: default@cbo_t2@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 20. Test get stats with empty partition list -select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true +POSTHOOK: query: select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t2 diff --git a/ql/src/test/results/clientpositive/llap/cbo_subq_exists.q.out b/ql/src/test/results/clientpositive/llap/cbo_subq_exists.q.out index 50bfbe2..45f4524 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_subq_exists.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_subq_exists.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- 18. SubQueries Not Exists --- distinct, corr -select * +PREHOOK: query: select * from src_cbo b where not exists (select distinct a.key @@ -10,9 +8,7 @@ where not exists PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- 18. SubQueries Not Exists --- distinct, corr -select * +POSTHOOK: query: select * from src_cbo b where not exists (select distinct a.key @@ -141,8 +137,7 @@ POSTHOOK: Input: default@src_cbo 199 val_199 199 val_199 2 val_2 -PREHOOK: query: -- no agg, corr, having -select * +PREHOOK: query: select * from src_cbo b group by key, value having not exists @@ -153,8 +148,7 @@ having not exists PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- no agg, corr, having -select * +POSTHOOK: query: select * from src_cbo b group by key, value having not exists @@ -179,9 +173,7 @@ POSTHOOK: Input: default@src_cbo 118 val_118 119 val_119 12 val_12 -PREHOOK: query: -- 19. SubQueries Exists --- view test -create view cv1 as +PREHOOK: query: create view cv1 as select * from src_cbo b where exists @@ -192,9 +184,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src_cbo PREHOOK: Output: database:default PREHOOK: Output: default@cv1 -POSTHOOK: query: -- 19. SubQueries Exists --- view test -create view cv1 as +POSTHOOK: query: create view cv1 as select * from src_cbo b where exists @@ -226,8 +216,7 @@ POSTHOOK: Input: default@src_cbo 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- sq in from -select * +PREHOOK: query: select * from (select * from src_cbo b where exists @@ -238,8 +227,7 @@ from (select * PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- sq in from -select * +POSTHOOK: query: select * from (select * from src_cbo b where exists @@ -261,8 +249,7 @@ POSTHOOK: Input: default@src_cbo 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- sq in from, having -select * +PREHOOK: query: select * from (select b.key, count(*) from src_cbo b group by b.key @@ -275,8 +262,7 @@ from (select b.key, count(*) PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- sq in from, having -select * +POSTHOOK: query: select * from (select b.key, count(*) from src_cbo b group by b.key diff --git a/ql/src/test/results/clientpositive/llap/cbo_subq_in.q.out b/ql/src/test/results/clientpositive/llap/cbo_subq_in.q.out index f6bfad2..39a4f8a 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_subq_in.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_subq_in.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- 17. SubQueries In --- non agg, non corr -select * +PREHOOK: query: select * from src_cbo where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') order by key PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- 17. SubQueries In --- non agg, non corr -select * +POSTHOOK: query: select * from src_cbo where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') order by key POSTHOOK: type: QUERY @@ -25,11 +21,7 @@ POSTHOOK: Input: default@src_cbo 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- agg, corr --- add back once rank issue fixed for cbo - --- distinct, corr -select * +PREHOOK: query: select * from src_cbo b where b.key in (select distinct a.key @@ -39,11 +31,7 @@ where b.key in PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- agg, corr --- add back once rank issue fixed for cbo - --- distinct, corr -select * +POSTHOOK: query: select * from src_cbo b where b.key in (select distinct a.key @@ -64,8 +52,7 @@ POSTHOOK: Input: default@src_cbo 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- non agg, corr, with join in Parent Query -select p.p_partkey, li.l_suppkey +PREHOOK: query: select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) @@ -73,8 +60,7 @@ where li.l_linenumber = 1 and PREHOOK: type: QUERY PREHOOK: Input: default@lineitem #### A masked pattern was here #### -POSTHOOK: query: -- non agg, corr, with join in Parent Query -select p.p_partkey, li.l_suppkey +POSTHOOK: query: select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) @@ -84,13 +70,7 @@ POSTHOOK: Input: default@lineitem #### A masked pattern was here #### 4297 1798 108570 8571 -PREHOOK: query: -- where and having --- Plan is: --- Stage 1: b semijoin sq1:src_cbo (subquery in where) --- Stage 2: group by Stage 1 o/p --- Stage 5: group by on sq2:src_cbo (subquery in having) --- Stage 6: Stage 2 o/p semijoin Stage 5 -select key, value, count(*) +PREHOOK: query: select key, value, count(*) from src_cbo b where b.key in (select key from src_cbo where src_cbo.key > '8') group by key, value @@ -98,13 +78,7 @@ having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- where and having --- Plan is: --- Stage 1: b semijoin sq1:src_cbo (subquery in where) --- Stage 2: group by Stage 1 o/p --- Stage 5: group by on sq2:src_cbo (subquery in having) --- Stage 6: Stage 2 o/p semijoin Stage 5 -select key, value, count(*) +POSTHOOK: query: select key, value, count(*) from src_cbo b where b.key in (select key from src_cbo where src_cbo.key > '8') group by key, value @@ -126,8 +100,7 @@ POSTHOOK: Input: default@src_cbo 96 val_96 1 97 val_97 2 98 val_98 2 -PREHOOK: query: -- non agg, non corr, windowing -select p_mfgr, p_name, avg(p_size) +PREHOOK: query: select p_mfgr, p_name, avg(p_size) from part group by p_mfgr, p_name having p_name in @@ -135,8 +108,7 @@ having p_name in PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- non agg, non corr, windowing -select p_mfgr, p_name, avg(p_size) +POSTHOOK: query: select p_mfgr, p_name, avg(p_size) from part group by p_mfgr, p_name having p_name in diff --git a/ql/src/test/results/clientpositive/llap/cbo_subq_not_in.q.out b/ql/src/test/results/clientpositive/llap/cbo_subq_not_in.q.out index c7274f7..c006d11 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_subq_not_in.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_subq_not_in.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- 16. SubQueries Not In --- non agg, non corr -select * +PREHOOK: query: select * from src_cbo where src_cbo.key not in ( select key from src_cbo s1 @@ -9,9 +7,7 @@ where src_cbo.key not in PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- 16. SubQueries Not In --- non agg, non corr -select * +POSTHOOK: query: select * from src_cbo where src_cbo.key not in ( select key from src_cbo s1 @@ -139,8 +135,7 @@ POSTHOOK: Input: default@src_cbo 199 val_199 199 val_199 2 val_2 -PREHOOK: query: -- non agg, corr -select p_mfgr, b.p_name, p_size +PREHOOK: query: select p_mfgr, b.p_name, p_size from part b where b.p_name not in (select p_name @@ -150,8 +145,7 @@ where b.p_name not in PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- non agg, corr -select p_mfgr, b.p_name, p_size +POSTHOOK: query: select p_mfgr, b.p_name, p_size from part b where b.p_name not in (select p_name @@ -179,8 +173,7 @@ Manufacturer#4 almond antique violet mint lemon 39 Manufacturer#5 almond azure blanched chiffon midnight 23 Manufacturer#5 almond antique blue firebrick mint 31 Manufacturer#5 almond aquamarine dodger light gainsboro 46 -PREHOOK: query: -- agg, non corr -select p_name, p_size +PREHOOK: query: select p_name, p_size from part where part.p_size not in (select avg(p_size) @@ -190,8 +183,7 @@ part where part.p_size not in PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- agg, non corr -select p_name, p_size +POSTHOOK: query: select p_name, p_size from part where part.p_size not in (select avg(p_size) @@ -227,8 +219,7 @@ almond aquamarine sandy cyan gainsboro 18 almond aquamarine yellow dodger mint 7 almond azure aquamarine papaya violet 12 almond azure blanched chiffon midnight 23 -PREHOOK: query: -- agg, corr -select p_mfgr, p_name, p_size +PREHOOK: query: select p_mfgr, p_name, p_size from part b where b.p_size not in (select min(p_size) from (select p_mfgr, p_size from part) a @@ -237,8 +228,7 @@ from part b where b.p_size not in PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- agg, corr -select p_mfgr, p_name, p_size +POSTHOOK: query: select p_mfgr, p_name, p_size from part b where b.p_size not in (select min(p_size) from (select p_mfgr, p_size from part) a @@ -267,8 +257,7 @@ Manufacturer#2 almond aquamarine rose maroon antique 25 Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 Manufacturer#4 almond azure aquamarine papaya violet 12 Manufacturer#5 almond azure blanched chiffon midnight 23 -PREHOOK: query: -- non agg, non corr, Group By in Parent Query -select li.l_partkey, count(*) +PREHOOK: query: select li.l_partkey, count(*) from lineitem li where li.l_linenumber = 1 and li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') @@ -276,8 +265,7 @@ group by li.l_partkey order by li.l_partkey PREHOOK: type: QUERY PREHOOK: Input: default@lineitem #### A masked pattern was here #### -POSTHOOK: query: -- non agg, non corr, Group By in Parent Query -select li.l_partkey, count(*) +POSTHOOK: query: select li.l_partkey, count(*) from lineitem li where li.l_linenumber = 1 and li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') @@ -301,10 +289,7 @@ POSTHOOK: Input: default@lineitem 139636 1 175839 1 182052 1 -PREHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. - --- non agg, corr, having -select b.p_mfgr, min(p_retailprice) +PREHOOK: query: select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr having b.p_mfgr not in @@ -316,10 +301,7 @@ having b.p_mfgr not in PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. - --- non agg, corr, having -select b.p_mfgr, min(p_retailprice) +POSTHOOK: query: select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr having b.p_mfgr not in @@ -333,8 +315,7 @@ POSTHOOK: Input: default@part #### A masked pattern was here #### Manufacturer#1 1173.15 Manufacturer#2 1690.68 -PREHOOK: query: -- agg, non corr, having -select b.p_mfgr, min(p_retailprice) +PREHOOK: query: select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr having b.p_mfgr not in @@ -347,8 +328,7 @@ having b.p_mfgr not in PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- agg, non corr, having -select b.p_mfgr, min(p_retailprice) +POSTHOOK: query: select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr having b.p_mfgr not in diff --git a/ql/src/test/results/clientpositive/llap/cbo_udf_udaf.q.out b/ql/src/test/results/clientpositive/llap/cbo_udf_udaf.q.out index 156d02f..696d320 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_udf_udaf.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_udf_udaf.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 8. Test UDF/UDAF -select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 +PREHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 8. Test UDF/UDAF -select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 +POSTHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/llap/cbo_union.q.out b/ql/src/test/results/clientpositive/llap/cbo_union.q.out index fb86d22..f6f36f6 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_union.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_union.q.out @@ -1,17 +1,11 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 11. Union All -select * from (select * from cbo_t1 order by key, c_boolean, value, dt)a union all select * from (select * from cbo_t2 order by key, c_boolean, value, dt)b +PREHOOK: query: select * from (select * from cbo_t1 order by key, c_boolean, value, dt)a union all select * from (select * from cbo_t2 order by key, c_boolean, value, dt)b PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 PREHOOK: Input: default@cbo_t2 PREHOOK: Input: default@cbo_t2@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 11. Union All -select * from (select * from cbo_t1 order by key, c_boolean, value, dt)a union all select * from (select * from cbo_t2 order by key, c_boolean, value, dt)b +POSTHOOK: query: select * from (select * from cbo_t1 order by key, c_boolean, value, dt)a union all select * from (select * from cbo_t2 order by key, c_boolean, value, dt)b POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/llap/cbo_views.q.out b/ql/src/test/results/clientpositive/llap/cbo_views.q.out index 4a7b935..e412ee1 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_views.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_views.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- 10. Test views -create view v1 as select c_int, value, c_boolean, dt from cbo_t1 +PREHOOK: query: create view v1 as select c_int, value, c_boolean, dt from cbo_t1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@cbo_t1 PREHOOK: Output: database:default PREHOOK: Output: default@v1 -POSTHOOK: query: -- 10. Test views -create view v1 as select c_int, value, c_boolean, dt from cbo_t1 +POSTHOOK: query: create view v1 as select c_int, value, c_boolean, dt from cbo_t1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@cbo_t1 POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/llap/cbo_windowing.q.out b/ql/src/test/results/clientpositive/llap/cbo_windowing.q.out index 52b584a..f1913d7 100644 --- a/ql/src/test/results/clientpositive/llap/cbo_windowing.q.out +++ b/ql/src/test/results/clientpositive/llap/cbo_windowing.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- 9. Test Windowing Functions --- SORT_QUERY_RESULTS - -select count(c_int) over() from cbo_t1 +PREHOOK: query: select count(c_int) over() from cbo_t1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 9. Test Windowing Functions --- SORT_QUERY_RESULTS - -select count(c_int) over() from cbo_t1 +POSTHOOK: query: select count(c_int) over() from cbo_t1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/llap/column_access_stats.q.out b/ql/src/test/results/clientpositive/llap/column_access_stats.q.out index 5a66b0d..c56c818 100644 --- a/ql/src/test/results/clientpositive/llap/column_access_stats.q.out +++ b/ql/src/test/results/clientpositive/llap/column_access_stats.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This test is used for testing the ColumnAccessAnalyzer - -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 @@ -21,8 +18,7 @@ PREHOOK: query: CREATE TABLE T4(key STRING, val STRING) PARTITIONED BY (p STRING PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T4 -PREHOOK: query: -- Simple select queries -SELECT key FROM T1 +PREHOOK: query: SELECT key FROM T1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### @@ -79,8 +75,7 @@ PREHOOK: Input: default@t4 Table:default@t4 Columns:p,val -PREHOOK: query: -- More complicated select queries -EXPLAIN SELECT key FROM (SELECT key, val FROM T1) subq1 +PREHOOK: query: EXPLAIN SELECT key FROM (SELECT key, val FROM T1) subq1 PREHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -166,8 +161,7 @@ Columns:key,val 24.0 26.0 36.0 -PREHOOK: query: -- Work with union -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM ( SELECT key as c FROM T1 UNION ALL @@ -343,8 +337,7 @@ Columns:key 8 8 8 -PREHOOK: query: -- Work with insert overwrite -FROM T1 +PREHOOK: query: FROM T1 INSERT OVERWRITE TABLE T2 SELECT key, count(1) GROUP BY key INSERT OVERWRITE TABLE T3 SELECT key, sum(val) GROUP BY key PREHOOK: type: QUERY @@ -354,8 +347,7 @@ PREHOOK: Output: default@t3 Table:default@t1 Columns:key,val -PREHOOK: query: -- Simple joins -SELECT * +PREHOOK: query: SELECT * FROM T1 JOIN T2 ON T1.key = T2.key PREHOOK: type: QUERY @@ -486,8 +478,7 @@ Columns:key,val Table:default@t2 Columns:key,val -PREHOOK: query: -- Map join -SELECT /*+ MAPJOIN(a) */ * +PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY @@ -506,8 +497,7 @@ Columns:key,val 7 17 7 1 8 18 8 2 8 28 8 2 -PREHOOK: query: -- More joins -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM T1 JOIN T2 ON T1.key = T2.key AND T1.val = 3 and T2.val = 3 @@ -709,8 +699,7 @@ Columns:key,val Table:default@t2 Columns:key,val -PREHOOK: query: -- Join followed by join -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM ( @@ -876,8 +865,7 @@ Columns:key,val 7 7 17.0 8 8 46.0 8 8 46.0 -PREHOOK: query: -- for partitioned table -SELECT * FROM srcpart TABLESAMPLE (10 ROWS) +PREHOOK: query: SELECT * FROM srcpart TABLESAMPLE (10 ROWS) PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/llap/columnstats_part_coltype.q.out b/ql/src/test/results/clientpositive/llap/columnstats_part_coltype.q.out index d52f020..3e28e58 100644 --- a/ql/src/test/results/clientpositive/llap/columnstats_part_coltype.q.out +++ b/ql/src/test/results/clientpositive/llap/columnstats_part_coltype.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- Test type date, int, and string in partition column -drop table if exists partcolstats +PREHOOK: query: drop table if exists partcolstats PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Test type date, int, and string in partition column -drop table if exists partcolstats +POSTHOOK: query: drop table if exists partcolstats POSTHOOK: type: DROPTABLE PREHOOK: query: create table partcolstats (key int, value string) partitioned by (ds date, hr int, part string) PREHOOK: type: CREATETABLE @@ -286,11 +284,9 @@ POSTHOOK: query: drop table partcolstats POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@partcolstats POSTHOOK: Output: default@partcolstats -PREHOOK: query: -- Test type tinyint, smallint, and bigint in partition column -drop table if exists partcolstatsnum +PREHOOK: query: drop table if exists partcolstatsnum PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Test type tinyint, smallint, and bigint in partition column -drop table if exists partcolstatsnum +POSTHOOK: query: drop table if exists partcolstatsnum POSTHOOK: type: DROPTABLE PREHOOK: query: create table partcolstatsnum (key int, value string) partitioned by (tint tinyint, sint smallint, bint bigint) PREHOOK: type: CREATETABLE @@ -337,11 +333,9 @@ POSTHOOK: query: drop table partcolstatsnum POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@partcolstatsnum POSTHOOK: Output: default@partcolstatsnum -PREHOOK: query: -- Test type decimal in partition column -drop table if exists partcolstatsdec +PREHOOK: query: drop table if exists partcolstatsdec PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Test type decimal in partition column -drop table if exists partcolstatsdec +POSTHOOK: query: drop table if exists partcolstatsdec POSTHOOK: type: DROPTABLE PREHOOK: query: create table partcolstatsdec (key int, value string) partitioned by (decpart decimal(8,4)) PREHOOK: type: CREATETABLE @@ -388,11 +382,9 @@ POSTHOOK: query: drop table partcolstatsdec POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@partcolstatsdec POSTHOOK: Output: default@partcolstatsdec -PREHOOK: query: -- Test type varchar and char in partition column -drop table if exists partcolstatschar +PREHOOK: query: drop table if exists partcolstatschar PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Test type varchar and char in partition column -drop table if exists partcolstatschar +POSTHOOK: query: drop table if exists partcolstatschar POSTHOOK: type: DROPTABLE PREHOOK: query: create table partcolstatschar (key int, value string) partitioned by (varpart varchar(5), charpart char(3)) PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/llap/correlationoptimizer1.q.out b/ql/src/test/results/clientpositive/llap/correlationoptimizer1.q.out index 5702c6e..74a430d 100644 --- a/ql/src/test/results/clientpositive/llap/correlationoptimizer1.q.out +++ b/ql/src/test/results/clientpositive/llap/correlationoptimizer1.q.out @@ -1,20 +1,10 @@ -PREHOOK: query: -- This query has a GroupByOperator folling JoinOperator and they share the same keys. --- When Correlation Optimizer is turned off, three MR jobs will be generated. --- When Correlation Optimizer is turned on, two MR jobs will be generated --- and JoinOperator (on the column of key) and GroupByOperator (also on the column --- of key) will be executed in the first MR job. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) GROUP BY x.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- This query has a GroupByOperator folling JoinOperator and they share the same keys. --- When Correlation Optimizer is turned off, three MR jobs will be generated. --- When Correlation Optimizer is turned on, two MR jobs will be generated --- and JoinOperator (on the column of key) and GroupByOperator (also on the column --- of key) will be executed in the first MR job. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) @@ -304,21 +294,13 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### 652447 37 -PREHOOK: query: -- Enable hive.auto.convert.join. --- Correlation Optimizer will detect that the join will be converted to a Map-join, --- so it will not try to optimize this query. --- We should generate 1 MR job for subquery tmp. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) GROUP BY x.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- Enable hive.auto.convert.join. --- Correlation Optimizer will detect that the join will be converted to a Map-join, --- so it will not try to optimize this query. --- We should generate 1 MR job for subquery tmp. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) @@ -454,19 +436,13 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### 652447 37 -PREHOOK: query: -- If the key of a GroupByOperator is the left table's key in --- a Left Semi Join, these two operators will be executed in --- the same MR job when Correlation Optimizer is enabled. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt FROM src1 x LEFT SEMI JOIN src y ON (x.key = y.key) GROUP BY x.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- If the key of a GroupByOperator is the left table's key in --- a Left Semi Join, these two operators will be executed in --- the same MR job when Correlation Optimizer is enabled. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt FROM src1 x LEFT SEMI JOIN src y ON (x.key = y.key) @@ -766,19 +742,13 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### 652447 15 -PREHOOK: query: -- If the key of a GroupByOperator is the left table's key in --- a Left Outer Join, these two operators will be executed in --- the same MR job when Correlation Optimizer is enabled. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key) GROUP BY x.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- If the key of a GroupByOperator is the left table's key in --- a Left Outer Join, these two operators will be executed in --- the same MR job when Correlation Optimizer is enabled. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key) @@ -1056,19 +1026,13 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### 652447 47 -PREHOOK: query: -- If the key of a GroupByOperator is the right table's key in --- a Left Outer Join, we cannot use a single MR to execute these two --- operators because those keys with a null value are not grouped. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key) GROUP BY y.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- If the key of a GroupByOperator is the right table's key in --- a Left Outer Join, we cannot use a single MR to execute these two --- operators because those keys with a null value are not grouped. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key) @@ -1354,18 +1318,12 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### 652447 47 -PREHOOK: query: -- If a column of the key of a GroupByOperator is the right table's key in --- a Left Outer Join, we cannot use a single MR to execute these two --- operators because those keys with a null value are not grouped. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT x.key, y.value, count(1) AS cnt FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key AND x.value = y.value) GROUP BY x.key, y.value PREHOOK: type: QUERY -POSTHOOK: query: -- If a column of the key of a GroupByOperator is the right table's key in --- a Left Outer Join, we cannot use a single MR to execute these two --- operators because those keys with a null value are not grouped. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT x.key, y.value, count(1) AS cnt FROM src1 x LEFT OUTER JOIN src y ON (x.key = y.key AND x.value = y.value) GROUP BY x.key, y.value @@ -1630,19 +1588,13 @@ POSTHOOK: Input: default@src1 406 val_406 4 66 val_66 1 98 val_98 2 -PREHOOK: query: -- If the key of a GroupByOperator is the right table's key in --- a Right Outer Join, these two operators will be executed in --- the same MR job when Correlation Optimizer is enabled. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt FROM src1 x RIGHT OUTER JOIN src y ON (x.key = y.key) GROUP BY y.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- If the key of a GroupByOperator is the right table's key in --- a Right Outer Join, these two operators will be executed in --- the same MR job when Correlation Optimizer is enabled. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt FROM src1 x RIGHT OUTER JOIN src y ON (x.key = y.key) @@ -1928,19 +1880,13 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### 12744278 500 -PREHOOK: query: -- If the key of a GroupByOperator is the left table's key in --- a Right Outer Join, we cannot use a single MR to execute these two --- operators because those keys with a null value are not grouped. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt FROM src1 x RIGHT OUTER JOIN src y ON (x.key = y.key) GROUP BY x.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- If the key of a GroupByOperator is the left table's key in --- a Right Outer Join, we cannot use a single MR to execute these two --- operators because those keys with a null value are not grouped. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt FROM src1 x RIGHT OUTER JOIN src y ON (x.key = y.key) @@ -2218,21 +2164,13 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### 652447 500 -PREHOOK: query: -- This query has a Full Outer Join followed by a GroupByOperator and --- they share the same key. Because those keys with a null value are not grouped --- in the output of the Full Outer Join, we cannot use a single MR to execute --- these two operators. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt FROM src1 x FULL OUTER JOIN src y ON (x.key = y.key) GROUP BY x.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- This query has a Full Outer Join followed by a GroupByOperator and --- they share the same key. Because those keys with a null value are not grouped --- in the output of the Full Outer Join, we cannot use a single MR to execute --- these two operators. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt FROM src1 x FULL OUTER JOIN src y ON (x.key = y.key) @@ -2510,15 +2448,13 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### 652447 510 -PREHOOK: query: -- Currently, we only handle exactly same keys, this query will not be optimized -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.value)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, x.value AS value, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) GROUP BY x.key, x.value) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- Currently, we only handle exactly same keys, this query will not be optimized -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.value)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, x.value AS value, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) @@ -2810,15 +2746,13 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### 652447 661329102 37 -PREHOOK: query: -- Currently, we only handle exactly same keys, this query will not be optimized -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key AND x.value = y.value) GROUP BY x.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- Currently, we only handle exactly same keys, this query will not be optimized -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key AND x.value = y.value) diff --git a/ql/src/test/results/clientpositive/llap/correlationoptimizer2.q.out b/ql/src/test/results/clientpositive/llap/correlationoptimizer2.q.out index dfb4804..1711d55 100644 --- a/ql/src/test/results/clientpositive/llap/correlationoptimizer2.q.out +++ b/ql/src/test/results/clientpositive/llap/correlationoptimizer2.q.out @@ -1,23 +1,11 @@ -PREHOOK: query: -- In this query, subquery a and b both have a GroupByOperator and the a and b will be --- joined. The key of JoinOperator is the same with both keys of GroupByOperators in subquery --- a and b. When Correlation Optimizer is turned off, we have four MR jobs. --- When Correlation Optimizer is turned on, 2 MR jobs will be generated. --- The first job will evaluate subquery tmp (including subquery a, b, and the JoinOperator on a --- and b). -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(key1)), SUM(HASH(cnt1)), SUM(HASH(key2)), SUM(HASH(cnt2)) FROM (SELECT a.key AS key1, a.cnt AS cnt1, b.key AS key2, b.cnt AS cnt2 FROM (SELECT x.key as key, count(x.value) AS cnt FROM src x group by x.key) a JOIN (SELECT y.key as key, count(y.value) AS cnt FROM src1 y group by y.key) b ON (a.key = b.key)) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- In this query, subquery a and b both have a GroupByOperator and the a and b will be --- joined. The key of JoinOperator is the same with both keys of GroupByOperators in subquery --- a and b. When Correlation Optimizer is turned off, we have four MR jobs. --- When Correlation Optimizer is turned on, 2 MR jobs will be generated. --- The first job will evaluate subquery tmp (including subquery a, b, and the JoinOperator on a --- and b). -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(key1)), SUM(HASH(cnt1)), SUM(HASH(key2)), SUM(HASH(cnt2)) FROM (SELECT a.key AS key1, a.cnt AS cnt1, b.key AS key2, b.cnt AS cnt2 FROM (SELECT x.key as key, count(x.value) AS cnt FROM src x group by x.key) a @@ -346,16 +334,14 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### 652447 37 652447 15 -PREHOOK: query: -- Left Outer Join should be handled. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(key1)), SUM(HASH(cnt1)), SUM(HASH(key2)), SUM(HASH(cnt2)) FROM (SELECT a.key AS key1, a.cnt AS cnt1, b.key AS key2, b.cnt AS cnt2 FROM (SELECT x.key as key, count(x.value) AS cnt FROM src x group by x.key) a LEFT OUTER JOIN (SELECT y.key as key, count(y.value) AS cnt FROM src1 y group by y.key) b ON (a.key = b.key)) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- Left Outer Join should be handled. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(key1)), SUM(HASH(cnt1)), SUM(HASH(key2)), SUM(HASH(cnt2)) FROM (SELECT a.key AS key1, a.cnt AS cnt1, b.key AS key2, b.cnt AS cnt2 FROM (SELECT x.key as key, count(x.value) AS cnt FROM src x group by x.key) a @@ -680,16 +666,14 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### 12744278 500 652447 15 -PREHOOK: query: -- Right Outer Join should be handled. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(key1)), SUM(HASH(cnt1)), SUM(HASH(key2)), SUM(HASH(cnt2)) FROM (SELECT a.key AS key1, a.cnt AS cnt1, b.key AS key2, b.cnt AS cnt2 FROM (SELECT x.key as key, count(x.value) AS cnt FROM src x group by x.key) a RIGHT OUTER JOIN (SELECT y.key as key, count(y.value) AS cnt FROM src1 y group by y.key) b ON (a.key = b.key)) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- Right Outer Join should be handled. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(key1)), SUM(HASH(cnt1)), SUM(HASH(key2)), SUM(HASH(cnt2)) FROM (SELECT a.key AS key1, a.cnt AS cnt1, b.key AS key2, b.cnt AS cnt2 FROM (SELECT x.key as key, count(x.value) AS cnt FROM src x group by x.key) a @@ -1014,16 +998,14 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### 652447 37 652447 25 -PREHOOK: query: -- Full Outer Join should be handled. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(key1)), SUM(HASH(cnt1)), SUM(HASH(key2)), SUM(HASH(cnt2)) FROM (SELECT a.key AS key1, a.cnt AS cnt1, b.key AS key2, b.cnt AS cnt2 FROM (SELECT x.key as key, count(x.value) AS cnt FROM src x group by x.key) a FULL OUTER JOIN (SELECT y.key as key, count(y.value) AS cnt FROM src1 y group by y.key) b ON (a.key = b.key)) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- Full Outer Join should be handled. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(key1)), SUM(HASH(cnt1)), SUM(HASH(key2)), SUM(HASH(cnt2)) FROM (SELECT a.key AS key1, a.cnt AS cnt1, b.key AS key2, b.cnt AS cnt2 FROM (SELECT x.key as key, count(x.value) AS cnt FROM src x group by x.key) a @@ -1544,10 +1526,7 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### 12744278 310 -PREHOOK: query: -- After FULL OUTER JOIN, keys with null values are not grouped, right now, --- we have to generate 2 MR jobs for tmp, 1 MR job for a join b and another for the --- GroupByOperator on key. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT a.key AS key, count(1) AS cnt FROM (SELECT x.key as key, count(x.value) AS cnt FROM src x group by x.key) a @@ -1555,10 +1534,7 @@ FROM (SELECT a.key AS key, count(1) AS cnt ON (a.key = b.key) GROUP BY a.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- After FULL OUTER JOIN, keys with null values are not grouped, right now, --- we have to generate 2 MR jobs for tmp, 1 MR job for a join b and another for the --- GroupByOperator on key. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT a.key AS key, count(1) AS cnt FROM (SELECT x.key as key, count(x.value) AS cnt FROM src x group by x.key) a @@ -1738,22 +1714,14 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### 12744278 310 -PREHOOK: query: -- When Correlation Optimizer is turned off, we need 4 MR jobs. --- When Correlation Optimizer is turned on, the subquery of tmp will be evaluated in --- a single MR job (including the subquery a, the subquery b, and a join b). So, we --- will have 2 MR jobs. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(key1)), SUM(HASH(cnt1)), SUM(HASH(key2)), SUM(HASH(cnt2)) FROM (SELECT a.key AS key1, a.val AS cnt1, b.key AS key2, b.cnt AS cnt2 FROM (SELECT x.key AS key, x.value AS val FROM src1 x JOIN src y ON (x.key = y.key)) a JOIN (SELECT z.key AS key, count(z.value) AS cnt FROM src1 z group by z.key) b ON (a.key = b.key)) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- When Correlation Optimizer is turned off, we need 4 MR jobs. --- When Correlation Optimizer is turned on, the subquery of tmp will be evaluated in --- a single MR job (including the subquery a, the subquery b, and a join b). So, we --- will have 2 MR jobs. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(key1)), SUM(HASH(cnt1)), SUM(HASH(key2)), SUM(HASH(cnt2)) FROM (SELECT a.key AS key1, a.val AS cnt1, b.key AS key2, b.cnt AS cnt2 FROM (SELECT x.key AS key, x.value AS val FROM src1 x JOIN src y ON (x.key = y.key)) a diff --git a/ql/src/test/results/clientpositive/llap/correlationoptimizer3.q.out b/ql/src/test/results/clientpositive/llap/correlationoptimizer3.q.out index 74b3d6c..36edab2 100644 --- a/ql/src/test/results/clientpositive/llap/correlationoptimizer3.q.out +++ b/ql/src/test/results/clientpositive/llap/correlationoptimizer3.q.out @@ -1,25 +1,11 @@ -PREHOOK: query: -- When Correlation Optimizer is turned off, 5 MR jobs will be generated. --- When Correlation Optimizer is turned on, the subquery tmp will be evalauted --- in a single MR job (including the subquery b, the subquery d, and b join d). --- At the reduce side of the MR job evaluating tmp, two operation paths --- (for subquery b and d) have different depths. The path starting from subquery b --- is JOIN->GBY->JOIN, which has a depth of 3. While, the path starting from subquery d --- is JOIN->JOIN. We should be able to handle this case. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value)) FROM (SELECT b.key AS key, b.cnt AS cnt, d.value AS value FROM (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) b JOIN (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) d ON b.key = d.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- When Correlation Optimizer is turned off, 5 MR jobs will be generated. --- When Correlation Optimizer is turned on, the subquery tmp will be evalauted --- in a single MR job (including the subquery b, the subquery d, and b join d). --- At the reduce side of the MR job evaluating tmp, two operation paths --- (for subquery b and d) have different depths. The path starting from subquery b --- is JOIN->GBY->JOIN, which has a depth of 3. While, the path starting from subquery d --- is JOIN->JOIN. We should be able to handle this case. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value)) FROM (SELECT b.key AS key, b.cnt AS cnt, d.value AS value FROM (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) b @@ -482,16 +468,14 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### 1711763 107 3531902962 -PREHOOK: query: -- Enable hive.auto.convert.join. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value)) FROM (SELECT b.key AS key, b.cnt AS cnt, d.value AS value FROM (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) b JOIN (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) d ON b.key = d.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- Enable hive.auto.convert.join. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value)) FROM (SELECT b.key AS key, b.cnt AS cnt, d.value AS value FROM (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) b @@ -1169,16 +1153,14 @@ POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### 1711763 107 3531902962 -PREHOOK: query: -- Enable hive.auto.convert.join. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value)) FROM (SELECT d.key AS key, d.cnt AS cnt, b.value as value FROM (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) b JOIN (SELECT x.key, count(1) AS cnt FROM src1 x JOIN src y ON (x.key = y.key) group by x.key) d ON b.key = d.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- Enable hive.auto.convert.join. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)), SUM(HASH(tmp.value)) FROM (SELECT d.key AS key, d.cnt AS cnt, b.value as value FROM (SELECT x.key, x.value FROM src1 x JOIN src y ON (x.key = y.key)) b diff --git a/ql/src/test/results/clientpositive/llap/correlationoptimizer4.q.out b/ql/src/test/results/clientpositive/llap/correlationoptimizer4.q.out index 67e636b..7ddd136 100644 --- a/ql/src/test/results/clientpositive/llap/correlationoptimizer4.q.out +++ b/ql/src/test/results/clientpositive/llap/correlationoptimizer4.q.out @@ -46,21 +46,13 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T3 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t3 -PREHOOK: query: -- When Correlation Optimizer is turned off, this query will be evaluated --- by 3 MR jobs. --- When Correlation Optimizer is turned on, this query will be evaluated by --- 2 MR jobs. The subquery tmp will be evaluated in a single MR job. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt FROM T2 x JOIN T1 y ON (x.key = y.key) JOIN T3 z ON (y.key = z.key) GROUP BY y.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- When Correlation Optimizer is turned off, this query will be evaluated --- by 3 MR jobs. --- When Correlation Optimizer is turned on, this query will be evaluated by --- 2 MR jobs. The subquery tmp will be evaluated in a single MR job. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt FROM T2 x JOIN T1 y ON (x.key = y.key) JOIN T3 z ON (y.key = z.key) @@ -404,15 +396,13 @@ POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 #### A masked pattern was here #### 13 10 -PREHOOK: query: -- Enable hive.auto.convert.join. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt FROM T2 x JOIN T1 y ON (x.key = y.key) JOIN T3 z ON (y.key = z.key) GROUP BY y.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- Enable hive.auto.convert.join. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt FROM T2 x JOIN T1 y ON (x.key = y.key) JOIN T3 z ON (y.key = z.key) @@ -576,17 +566,13 @@ POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 #### A masked pattern was here #### 13 10 -PREHOOK: query: -- This case should be optimized, since the key of GroupByOperator is from the leftmost table --- of a chain of LEFT OUTER JOINs. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt FROM T2 x LEFT OUTER JOIN T1 y ON (x.key = y.key) LEFT OUTER JOIN T3 z ON (y.key = z.key) GROUP BY x.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- This case should be optimized, since the key of GroupByOperator is from the leftmost table --- of a chain of LEFT OUTER JOINs. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT x.key AS key, count(1) AS cnt FROM T2 x LEFT OUTER JOIN T1 y ON (x.key = y.key) LEFT OUTER JOIN T3 z ON (y.key = z.key) @@ -904,19 +890,13 @@ POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 #### A masked pattern was here #### 22 12 -PREHOOK: query: -- This query will not be optimized by correlation optimizer because --- GroupByOperator uses y.key (a right table of a left outer join) --- as the key. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt FROM T2 x LEFT OUTER JOIN T1 y ON (x.key = y.key) LEFT OUTER JOIN T3 z ON (y.key = z.key) GROUP BY y.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- This query will not be optimized by correlation optimizer because --- GroupByOperator uses y.key (a right table of a left outer join) --- as the key. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt FROM T2 x LEFT OUTER JOIN T1 y ON (x.key = y.key) LEFT OUTER JOIN T3 z ON (y.key = z.key) @@ -1076,17 +1056,13 @@ POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 #### A masked pattern was here #### 13 12 -PREHOOK: query: -- This case should be optimized, since the key of GroupByOperator is from the rightmost table --- of a chain of RIGHT OUTER JOINs. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT z.key AS key, count(1) AS cnt FROM T2 x RIGHT OUTER JOIN T1 y ON (x.key = y.key) RIGHT OUTER JOIN T3 z ON (y.key = z.key) GROUP BY z.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- This case should be optimized, since the key of GroupByOperator is from the rightmost table --- of a chain of RIGHT OUTER JOINs. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT z.key AS key, count(1) AS cnt FROM T2 x RIGHT OUTER JOIN T1 y ON (x.key = y.key) RIGHT OUTER JOIN T3 z ON (y.key = z.key) @@ -1412,19 +1388,13 @@ POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 #### A masked pattern was here #### 21 12 -PREHOOK: query: -- This query will not be optimized by correlation optimizer because --- GroupByOperator uses y.key (a left table of a right outer join) --- as the key. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt FROM T2 x RIGHT OUTER JOIN T1 y ON (x.key = y.key) RIGHT OUTER JOIN T3 z ON (y.key = z.key) GROUP BY y.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- This query will not be optimized by correlation optimizer because --- GroupByOperator uses y.key (a left table of a right outer join) --- as the key. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt FROM T2 x RIGHT OUTER JOIN T1 y ON (x.key = y.key) RIGHT OUTER JOIN T3 z ON (y.key = z.key) @@ -1584,17 +1554,13 @@ POSTHOOK: Input: default@t2 POSTHOOK: Input: default@t3 #### A masked pattern was here #### 21 12 -PREHOOK: query: -- This case should not be optimized because afer the FULL OUTER JOIN, rows with null keys --- are not grouped. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt FROM T2 x FULL OUTER JOIN T1 y ON (x.key = y.key) FULL OUTER JOIN T3 z ON (y.key = z.key) GROUP BY y.key) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- This case should not be optimized because afer the FULL OUTER JOIN, rows with null keys --- are not grouped. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT SUM(HASH(tmp.key)), SUM(HASH(tmp.cnt)) FROM (SELECT y.key AS key, count(1) AS cnt FROM T2 x FULL OUTER JOIN T1 y ON (x.key = y.key) FULL OUTER JOIN T3 z ON (y.key = z.key) diff --git a/ql/src/test/results/clientpositive/llap/correlationoptimizer6.q.out b/ql/src/test/results/clientpositive/llap/correlationoptimizer6.q.out index cf66d25..4fec286 100644 --- a/ql/src/test/results/clientpositive/llap/correlationoptimizer6.q.out +++ b/ql/src/test/results/clientpositive/llap/correlationoptimizer6.q.out @@ -1,9 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- When Correlation Optimizer is turned off, 6 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery xx, subquery yy, and xx join yy. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, xx.cnt, yy.key, yy.cnt FROM (SELECT x.key as key, count(1) as cnt FROM src1 x JOIN src1 y ON (x.key = y.key) group by x.key) xx @@ -11,12 +6,7 @@ JOIN (SELECT x.key as key, count(1) as cnt FROM src x JOIN src y ON (x.key = y.key) group by x.key) yy ON xx.key=yy.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- When Correlation Optimizer is turned off, 6 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery xx, subquery yy, and xx join yy. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, xx.cnt, yy.key, yy.cnt FROM (SELECT x.key as key, count(1) as cnt FROM src1 x JOIN src1 y ON (x.key = y.key) group by x.key) xx @@ -496,8 +486,7 @@ POSTHOOK: Input: default@src1 406 1 406 16 66 1 66 1 98 1 98 4 -PREHOOK: query: -- Enable hive.auto.convert.join. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, xx.cnt, yy.key, yy.cnt FROM (SELECT x.key as key, count(1) as cnt FROM src1 x JOIN src1 y ON (x.key = y.key) group by x.key) xx @@ -505,8 +494,7 @@ JOIN (SELECT x.key as key, count(1) as cnt FROM src x JOIN src y ON (x.key = y.key) group by x.key) yy ON xx.key=yy.key PREHOOK: type: QUERY -POSTHOOK: query: -- Enable hive.auto.convert.join. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, xx.cnt, yy.key, yy.cnt FROM (SELECT x.key as key, count(1) as cnt FROM src1 x JOIN src1 y ON (x.key = y.key) group by x.key) xx @@ -722,20 +710,14 @@ POSTHOOK: Input: default@src1 406 1 406 16 66 1 66 1 98 1 98 4 -PREHOOK: query: -- When Correlation Optimizer is turned off, 3 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery yy and xx join yy. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, yy.key, yy.cnt FROM src1 xx JOIN (SELECT x.key as key, count(1) as cnt FROM src x GROUP BY x.key) yy ON xx.key=yy.key PREHOOK: type: QUERY -POSTHOOK: query: -- When Correlation Optimizer is turned off, 3 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery yy and xx join yy. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, yy.key, yy.cnt FROM src1 xx JOIN @@ -1018,20 +1000,14 @@ POSTHOOK: Input: default@src1 406 406 4 66 66 1 98 98 2 -PREHOOK: query: -- When Correlation Optimizer is turned off, 4 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery yy and xx join yy. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, yy.key, yy.cnt FROM src1 xx JOIN (SELECT x.key as key, count(1) as cnt FROM src x JOIN src y ON (x.key = y.key) group by x.key) yy ON xx.key=yy.key PREHOOK: type: QUERY -POSTHOOK: query: -- When Correlation Optimizer is turned off, 4 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery yy and xx join yy. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, yy.key, yy.cnt FROM src1 xx JOIN @@ -1386,20 +1362,14 @@ POSTHOOK: Input: default@src1 406 406 16 66 66 1 98 98 4 -PREHOOK: query: -- When Correlation Optimizer is turned off, 4 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery xx and xx join yy. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, xx.cnt, yy.key FROM (SELECT x.key as key, count(1) as cnt FROM src1 x JOIN src1 y ON (x.key = y.key) group by x.key) xx JOIN src yy ON xx.key=yy.key PREHOOK: type: QUERY -POSTHOOK: query: -- When Correlation Optimizer is turned off, 4 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery xx and xx join yy. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, xx.cnt, yy.key FROM (SELECT x.key as key, count(1) as cnt FROM src1 x JOIN src1 y ON (x.key = y.key) group by x.key) xx @@ -1798,10 +1768,7 @@ POSTHOOK: Input: default@src1 66 1 66 98 1 98 98 1 98 -PREHOOK: query: -- When Correlation Optimizer is turned off, 4 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery xx and xx join yy join zz. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, yy.key, yy.cnt FROM src1 xx JOIN src zz ON xx.key=zz.key @@ -1809,10 +1776,7 @@ JOIN (SELECT x.key as key, count(1) as cnt FROM src x JOIN src y ON (x.key = y.key) group by x.key) yy ON zz.key=yy.key PREHOOK: type: QUERY -POSTHOOK: query: -- When Correlation Optimizer is turned off, 4 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery xx and xx join yy join zz. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, yy.key, yy.cnt FROM src1 xx JOIN src zz ON xx.key=zz.key @@ -2036,10 +2000,7 @@ POSTHOOK: Input: default@src1 66 66 1 98 98 4 98 98 4 -PREHOOK: query: -- When Correlation Optimizer is turned off, 4 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery yy and xx join yy join zz. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, yy.key, yy.cnt FROM src1 xx JOIN src zz ON xx.key=zz.key @@ -2047,10 +2008,7 @@ JOIN (SELECT x.key as key, count(1) as cnt FROM src x JOIN src y ON (x.key = y.key) group by x.key) yy ON zz.key=yy.key PREHOOK: type: QUERY -POSTHOOK: query: -- When Correlation Optimizer is turned off, 4 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery yy and xx join yy join zz. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, yy.key, yy.cnt FROM src1 xx JOIN src zz ON xx.key=zz.key @@ -2274,10 +2232,7 @@ POSTHOOK: Input: default@src1 66 66 1 98 98 4 98 98 4 -PREHOOK: query: -- When Correlation Optimizer is turned off, 4 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery yy and xx join yy join zz. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, yy.key, yy.cnt FROM src1 xx JOIN @@ -2285,10 +2240,7 @@ JOIN ON xx.key=yy.key JOIN src zz ON yy.key=zz.key PREHOOK: type: QUERY -POSTHOOK: query: -- When Correlation Optimizer is turned off, 4 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery yy and xx join yy join zz. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, yy.key, yy.cnt FROM src1 xx JOIN @@ -2744,10 +2696,7 @@ POSTHOOK: Input: default@src1 66 66 1 98 98 4 98 98 4 -PREHOOK: query: -- When Correlation Optimizer is turned off, 6 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery tmp and tmp join z. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT tmp.key, tmp.sum1, tmp.sum2, z.key, z.value FROM (SELECT xx.key as key, sum(xx.cnt) as sum1, sum(yy.cnt) as sum2 @@ -2756,10 +2705,7 @@ FROM ON (xx.key=yy.key) GROUP BY xx.key) tmp JOIN src z ON tmp.key=z.key PREHOOK: type: QUERY -POSTHOOK: query: -- When Correlation Optimizer is turned off, 6 MR jobs are needed. --- When Correlation Optimizer is turned on, 2 MR jobs are needed. --- The first job will evaluate subquery tmp and tmp join z. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT tmp.key, tmp.sum1, tmp.sum2, z.key, z.value FROM (SELECT xx.key as key, sum(xx.cnt) as sum1, sum(yy.cnt) as sum2 @@ -3258,11 +3204,7 @@ POSTHOOK: Input: default@src1 66 1 1 66 val_66 98 2 1 98 val_98 98 2 1 98 val_98 -PREHOOK: query: -- When Correlation Optimizer is turned off, 6 MR jobs are needed. --- When Correlation Optimizer is turned on, 4 MR jobs are needed. --- 2 MR jobs are used to evaluate yy, 1 MR is used to evaluate xx and xx join yy. --- The last MR is used for ordering. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT xx.key, xx.cnt, yy.key, yy.value, yy.cnt FROM (SELECT x.key as key, count(1) as cnt FROM src1 x JOIN src1 y ON (x.key = y.key) group by x.key) xx @@ -3270,11 +3212,7 @@ JOIN (SELECT x.key as key, x.value as value, count(1) as cnt FROM src x JOIN src y ON (x.key = y.key) group by x.key, x.value) yy ON xx.key=yy.key ORDER BY xx.key, xx.cnt, yy.key, yy.value, yy.cnt PREHOOK: type: QUERY -POSTHOOK: query: -- When Correlation Optimizer is turned off, 6 MR jobs are needed. --- When Correlation Optimizer is turned on, 4 MR jobs are needed. --- 2 MR jobs are used to evaluate yy, 1 MR is used to evaluate xx and xx join yy. --- The last MR is used for ordering. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT xx.key, xx.cnt, yy.key, yy.value, yy.cnt FROM (SELECT x.key as key, count(1) as cnt FROM src1 x JOIN src1 y ON (x.key = y.key) group by x.key) xx diff --git a/ql/src/test/results/clientpositive/llap/count.q.out b/ql/src/test/results/clientpositive/llap/count.q.out index 5c38049..2953718 100644 --- a/ql/src/test/results/clientpositive/llap/count.q.out +++ b/ql/src/test/results/clientpositive/llap/count.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS -create table abcd (a int, b int, c int, d int) +PREHOOK: query: create table abcd (a int, b int, c int, d int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@abcd -POSTHOOK: query: -- SORT_QUERY_RESULTS -create table abcd (a int, b int, c int, d int) +POSTHOOK: query: create table abcd (a int, b int, c int, d int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@abcd @@ -304,11 +302,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@abcd #### A masked pattern was here #### 7 7 6 6 6 7 3 3 6 7 4 5 6 6 5 6 4 5 5 5 4 -PREHOOK: query: --first aggregation with literal. gbinfo was generating wrong expression -explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd +PREHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd PREHOOK: type: QUERY -POSTHOOK: query: --first aggregation with literal. gbinfo was generating wrong expression -explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd +POSTHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -710,11 +706,9 @@ POSTHOOK: Input: default@abcd 1 1 1 1 1 1 -PREHOOK: query: --non distinct aggregate with same column as group by key -explain select a, count(distinct b), count(distinct c), sum(d), sum(d+d), sum(d*3), sum(b), sum(c), sum(a), sum(distinct a), sum(distinct b) from abcd group by a +PREHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d), sum(d+d), sum(d*3), sum(b), sum(c), sum(a), sum(distinct a), sum(distinct b) from abcd group by a PREHOOK: type: QUERY -POSTHOOK: query: --non distinct aggregate with same column as group by key -explain select a, count(distinct b), count(distinct c), sum(d), sum(d+d), sum(d*3), sum(b), sum(c), sum(a), sum(distinct a), sum(distinct b) from abcd group by a +POSTHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d), sum(d+d), sum(d*3), sum(b), sum(c), sum(a), sum(distinct a), sum(distinct b) from abcd group by a POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -780,11 +774,9 @@ POSTHOOK: Input: default@abcd 100 1 1 3 6 9 100 10 100 100 100 12 1 2 9 18 27 100 155 24 12 100 NULL 1 1 6 12 18 35 23 NULL NULL 35 -PREHOOK: query: --non distinct aggregate with same column as distinct aggregate -explain select a, count(distinct b), count(distinct c), sum(d), sum(c) from abcd group by a +PREHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d), sum(c) from abcd group by a PREHOOK: type: QUERY -POSTHOOK: query: --non distinct aggregate with same column as distinct aggregate -explain select a, count(distinct b), count(distinct c), sum(d), sum(c) from abcd group by a +POSTHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d), sum(c) from abcd group by a POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -850,11 +842,9 @@ POSTHOOK: Input: default@abcd 100 1 1 3 10 12 1 2 9 155 NULL 1 1 6 23 -PREHOOK: query: --aggregation with literal -explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd +PREHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd PREHOOK: type: QUERY -POSTHOOK: query: --aggregation with literal -explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd +POSTHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/llap/cross_join.q.out b/ql/src/test/results/clientpositive/llap/cross_join.q.out index 8578dbf..c0da4ec 100644 --- a/ql/src/test/results/clientpositive/llap/cross_join.q.out +++ b/ql/src/test/results/clientpositive/llap/cross_join.q.out @@ -1,9 +1,7 @@ Warning: Shuffle Join MERGEJOIN[9][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product -PREHOOK: query: -- current -explain select src.key from src join src src2 +PREHOOK: query: explain select src.key from src join src src2 PREHOOK: type: QUERY -POSTHOOK: query: -- current -explain select src.key from src join src src2 +POSTHOOK: query: explain select src.key from src join src src2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -70,11 +68,9 @@ STAGE PLANS: ListSink Warning: Shuffle Join MERGEJOIN[9][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product -PREHOOK: query: -- ansi cross join -explain select src.key from src cross join src src2 +PREHOOK: query: explain select src.key from src cross join src src2 PREHOOK: type: QUERY -POSTHOOK: query: -- ansi cross join -explain select src.key from src cross join src src2 +POSTHOOK: query: explain select src.key from src cross join src src2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -140,11 +136,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- appending condition is allowed -explain select src.key from src cross join src src2 on src.key=src2.key +PREHOOK: query: explain select src.key from src cross join src src2 on src.key=src2.key PREHOOK: type: QUERY -POSTHOOK: query: -- appending condition is allowed -explain select src.key from src cross join src src2 on src.key=src2.key +POSTHOOK: query: explain select src.key from src cross join src src2 on src.key=src2.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/llap/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/llap/cross_product_check_1.q.out index be303c4..120f8b6 100644 --- a/ql/src/test/results/clientpositive/llap/cross_product_check_1.q.out +++ b/ql/src/test/results/clientpositive/llap/cross_product_check_1.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table A as +PREHOOK: query: create table A as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@A -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table A as +POSTHOOK: query: create table A as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/llap/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/llap/cross_product_check_2.q.out index 3b14b84..c865788 100644 --- a/ql/src/test/results/clientpositive/llap/cross_product_check_2.q.out +++ b/ql/src/test/results/clientpositive/llap/cross_product_check_2.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table A as +PREHOOK: query: create table A as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@A -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table A as +POSTHOOK: query: create table A as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/llap/ctas.q.out b/ql/src/test/results/clientpositive/llap/ctas.q.out index e2e7640..fe492e4 100644 --- a/ql/src/test/results/clientpositive/llap/ctas.q.out +++ b/ql/src/test/results/clientpositive/llap/ctas.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) --- SORT_QUERY_RESULTS - -create table nzhang_Tmp(a int, b string) +PREHOOK: query: create table nzhang_Tmp(a int, b string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@nzhang_Tmp -POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) --- SORT_QUERY_RESULTS - -create table nzhang_Tmp(a int, b string) +POSTHOOK: query: create table nzhang_Tmp(a int, b string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_Tmp diff --git a/ql/src/test/results/clientpositive/llap/cte_1.q.out b/ql/src/test/results/clientpositive/llap/cte_1.q.out index 7641da3..1e6a438 100644 --- a/ql/src/test/results/clientpositive/llap/cte_1.q.out +++ b/ql/src/test/results/clientpositive/llap/cte_1.q.out @@ -40,13 +40,11 @@ POSTHOOK: Input: default@src 5 5 5 -PREHOOK: query: -- in subquery -explain +PREHOOK: query: explain with q1 as ( select key from src where key = '5') select * from (select key from q1) a PREHOOK: type: QUERY -POSTHOOK: query: -- in subquery -explain +POSTHOOK: query: explain with q1 as ( select key from src where key = '5') select * from (select key from q1) a POSTHOOK: type: QUERY @@ -80,14 +78,12 @@ POSTHOOK: Input: default@src 5 5 5 -PREHOOK: query: -- chaining -explain +PREHOOK: query: explain with q1 as ( select key from q2 where key = '5'), q2 as ( select key from src where key = '5') select * from (select key from q1) a PREHOOK: type: QUERY -POSTHOOK: query: -- chaining -explain +POSTHOOK: query: explain with q1 as ( select key from q2 where key = '5'), q2 as ( select key from src where key = '5') select * from (select key from q1) a @@ -12906,16 +12902,14 @@ NULL y605nF0K3mMoM75j NULL 1073418988 s1Tij71BKtw43u -11535.0 1073680599 NULL NULL 1073680599 pWxC5d20ub50yq8EJ8qpQ4h NULL -PREHOOK: query: --standard rollup syntax -with q1 as (select * from alltypesorc) +PREHOOK: query: with q1 as (select * from alltypesorc) from q1 select cint, cstring1, avg(csmallint) group by rollup (cint, cstring1) PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: --standard rollup syntax -with q1 as (select * from alltypesorc) +POSTHOOK: query: with q1 as (select * from alltypesorc) from q1 select cint, cstring1, avg(csmallint) group by rollup (cint, cstring1) diff --git a/ql/src/test/results/clientpositive/llap/cte_3.q.out b/ql/src/test/results/clientpositive/llap/cte_3.q.out index 27f0ab1..b6cda54 100644 --- a/ql/src/test/results/clientpositive/llap/cte_3.q.out +++ b/ql/src/test/results/clientpositive/llap/cte_3.q.out @@ -54,13 +54,11 @@ POSTHOOK: Output: default@q1 5 5 5 -PREHOOK: query: -- in subquery -explain +PREHOOK: query: explain with q1 as ( select key from src where key = '5') select * from (select key from q1) a PREHOOK: type: QUERY -POSTHOOK: query: -- in subquery -explain +POSTHOOK: query: explain with q1 as ( select key from src where key = '5') select * from (select key from q1) a POSTHOOK: type: QUERY @@ -108,14 +106,12 @@ POSTHOOK: Output: default@q1 5 5 5 -PREHOOK: query: -- chaining -explain +PREHOOK: query: explain with q1 as ( select key from q2 where key = '5'), q2 as ( select key from src where key = '5') select * from (select key from q1) a PREHOOK: type: QUERY -POSTHOOK: query: -- chaining -explain +POSTHOOK: query: explain with q1 as ( select key from q2 where key = '5'), q2 as ( select key from src where key = '5') select * from (select key from q1) a diff --git a/ql/src/test/results/clientpositive/llap/current_date_timestamp.q.out b/ql/src/test/results/clientpositive/llap/current_date_timestamp.q.out index 328f75e..98ede4e 100644 --- a/ql/src/test/results/clientpositive/llap/current_date_timestamp.q.out +++ b/ql/src/test/results/clientpositive/llap/current_date_timestamp.q.out @@ -11,14 +11,12 @@ true true true true true true true true -PREHOOK: query: --ensure that timestamp is same for all the rows while using current_timestamp() query should return single row -select count(*) from (select current_timestamp() from alltypesorc union select current_timestamp() from src limit 5 ) subq +PREHOOK: query: select count(*) from (select current_timestamp() from alltypesorc union select current_timestamp() from src limit 5 ) subq PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: --ensure that timestamp is same for all the rows while using current_timestamp() query should return single row -select count(*) from (select current_timestamp() from alltypesorc union select current_timestamp() from src limit 5 ) subq +POSTHOOK: query: select count(*) from (select current_timestamp() from alltypesorc union select current_timestamp() from src limit 5 ) subq POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc POSTHOOK: Input: default@src @@ -41,11 +39,9 @@ POSTHOOK: Input: default@alltypesorc POSTHOOK: Input: default@src #### A masked pattern was here #### 1 -PREHOOK: query: --current_timestamp() should appear as expression -explain extended select current_timestamp() from alltypesorc +PREHOOK: query: explain extended select current_timestamp() from alltypesorc PREHOOK: type: QUERY -POSTHOOK: query: --current_timestamp() should appear as expression -explain extended select current_timestamp() from alltypesorc +POSTHOOK: query: explain extended select current_timestamp() from alltypesorc POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -63,8 +59,7 @@ STAGE PLANS: outputColumnNames: _col0 ListSink -PREHOOK: query: --current_timestamp() + insert -create temporary table tmp_runtimeconstant( +PREHOOK: query: create temporary table tmp_runtimeconstant( ts1 timestamp, ts2 timestamp, dt date, @@ -75,8 +70,7 @@ create temporary table tmp_runtimeconstant( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmp_runtimeconstant -POSTHOOK: query: --current_timestamp() + insert -create temporary table tmp_runtimeconstant( +POSTHOOK: query: create temporary table tmp_runtimeconstant( ts1 timestamp, ts2 timestamp, dt date, @@ -136,13 +130,11 @@ true true true true true true true true true true true true true true true true -PREHOOK: query: --current_date() + insert -drop table if exists tmp_runtimeconstant +PREHOOK: query: drop table if exists tmp_runtimeconstant PREHOOK: type: DROPTABLE PREHOOK: Input: default@tmp_runtimeconstant PREHOOK: Output: default@tmp_runtimeconstant -POSTHOOK: query: --current_date() + insert -drop table if exists tmp_runtimeconstant +POSTHOOK: query: drop table if exists tmp_runtimeconstant POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@tmp_runtimeconstant POSTHOOK: Output: default@tmp_runtimeconstant @@ -179,13 +171,11 @@ true true true true -PREHOOK: query: --current_timestamp() + current_date() + where -drop table if exists tmp_runtimeconstant +PREHOOK: query: drop table if exists tmp_runtimeconstant PREHOOK: type: DROPTABLE PREHOOK: Input: default@tmp_runtimeconstant PREHOOK: Output: default@tmp_runtimeconstant -POSTHOOK: query: --current_timestamp() + current_date() + where -drop table if exists tmp_runtimeconstant +POSTHOOK: query: drop table if exists tmp_runtimeconstant POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@tmp_runtimeconstant POSTHOOK: Output: default@tmp_runtimeconstant @@ -222,8 +212,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tmp_runtimeconstant #### A masked pattern was here #### 0 -PREHOOK: query: --current_timestamp() as argument for unix_timestamp(), hour(), minute(), second() -select unix_timestamp(current_timestamp()), +PREHOOK: query: select unix_timestamp(current_timestamp()), hour(current_timestamp()), minute(current_timestamp()), second(current_timestamp()) @@ -231,8 +220,7 @@ select unix_timestamp(current_timestamp()), PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: --current_timestamp() as argument for unix_timestamp(), hour(), minute(), second() -select unix_timestamp(current_timestamp()), +POSTHOOK: query: select unix_timestamp(current_timestamp()), hour(current_timestamp()), minute(current_timestamp()), second(current_timestamp()) @@ -245,8 +233,7 @@ POSTHOOK: Input: default@alltypesorc 1325408523 1 2 3 1325408523 1 2 3 1325408523 1 2 3 -PREHOOK: query: --current_timestamp() as argument for various date udfs -select to_date(current_timestamp()), +PREHOOK: query: select to_date(current_timestamp()), year(current_timestamp()), month(current_timestamp()), day(current_timestamp()), @@ -260,8 +247,7 @@ select to_date(current_timestamp()), PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: --current_timestamp() as argument for various date udfs -select to_date(current_timestamp()), +POSTHOOK: query: select to_date(current_timestamp()), year(current_timestamp()), month(current_timestamp()), day(current_timestamp()), @@ -280,8 +266,7 @@ POSTHOOK: Input: default@alltypesorc 2012-01-01 2012 1 1 52 0 2012-02-01 2011-12-01 2012-01-31 2012-01-06 2012-01-01 2012 1 1 52 0 2012-02-01 2011-12-01 2012-01-31 2012-01-06 2012-01-01 2012 1 1 52 0 2012-02-01 2011-12-01 2012-01-31 2012-01-06 -PREHOOK: query: --current_date() as argument for various date udfs -select to_date(current_date()), +PREHOOK: query: select to_date(current_date()), year(current_date()), month(current_date()), day(current_date()), @@ -295,8 +280,7 @@ select to_date(current_date()), PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: --current_date() as argument for various date udfs -select to_date(current_date()), +POSTHOOK: query: select to_date(current_date()), year(current_date()), month(current_date()), day(current_date()), diff --git a/ql/src/test/results/clientpositive/llap/custom_input_output_format.q.out b/ql/src/test/results/clientpositive/llap/custom_input_output_format.q.out index 662ed1a..edc972f 100644 --- a/ql/src/test/results/clientpositive/llap/custom_input_output_format.q.out +++ b/ql/src/test/results/clientpositive/llap/custom_input_output_format.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src1_rot13_iof(key STRING, value STRING) +PREHOOK: query: CREATE TABLE src1_rot13_iof(key STRING, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13InputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13OutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src1_rot13_iof -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src1_rot13_iof(key STRING, value STRING) +POSTHOOK: query: CREATE TABLE src1_rot13_iof(key STRING, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13InputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13OutputFormat' POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/llap/database.q.out b/ql/src/test/results/clientpositive/llap/database.q.out index 8c2653c..32dcd58 100644 --- a/ql/src/test/results/clientpositive/llap/database.q.out +++ b/ql/src/test/results/clientpositive/llap/database.q.out @@ -1,18 +1,12 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -SHOW DATABASES +PREHOOK: query: SHOW DATABASES PREHOOK: type: SHOWDATABASES -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SHOW DATABASES +POSTHOOK: query: SHOW DATABASES POSTHOOK: type: SHOWDATABASES default -PREHOOK: query: -- CREATE with comment -CREATE DATABASE test_db COMMENT 'Hive test database' +PREHOOK: query: CREATE DATABASE test_db COMMENT 'Hive test database' PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:test_db -POSTHOOK: query: -- CREATE with comment -CREATE DATABASE test_db COMMENT 'Hive test database' +POSTHOOK: query: CREATE DATABASE test_db COMMENT 'Hive test database' POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:test_db PREHOOK: query: SHOW DATABASES @@ -21,12 +15,10 @@ POSTHOOK: query: SHOW DATABASES POSTHOOK: type: SHOWDATABASES default test_db -PREHOOK: query: -- CREATE INE already exists -CREATE DATABASE IF NOT EXISTS test_db +PREHOOK: query: CREATE DATABASE IF NOT EXISTS test_db PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:test_db -POSTHOOK: query: -- CREATE INE already exists -CREATE DATABASE IF NOT EXISTS test_db +POSTHOOK: query: CREATE DATABASE IF NOT EXISTS test_db POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:test_db PREHOOK: query: SHOW DATABASES @@ -35,21 +27,17 @@ POSTHOOK: query: SHOW DATABASES POSTHOOK: type: SHOWDATABASES default test_db -PREHOOK: query: -- SHOW DATABASES synonym -SHOW SCHEMAS +PREHOOK: query: SHOW SCHEMAS PREHOOK: type: SHOWDATABASES -POSTHOOK: query: -- SHOW DATABASES synonym -SHOW SCHEMAS +POSTHOOK: query: SHOW SCHEMAS POSTHOOK: type: SHOWDATABASES default test_db -PREHOOK: query: -- DROP -DROP DATABASE test_db +PREHOOK: query: DROP DATABASE test_db PREHOOK: type: DROPDATABASE PREHOOK: Input: database:test_db PREHOOK: Output: database:test_db -POSTHOOK: query: -- DROP -DROP DATABASE test_db +POSTHOOK: query: DROP DATABASE test_db POSTHOOK: type: DROPDATABASE POSTHOOK: Input: database:test_db POSTHOOK: Output: database:test_db @@ -58,12 +46,10 @@ PREHOOK: type: SHOWDATABASES POSTHOOK: query: SHOW DATABASES POSTHOOK: type: SHOWDATABASES default -PREHOOK: query: -- CREATE INE doesn't exist -CREATE DATABASE IF NOT EXISTS test_db COMMENT 'Hive test database' +PREHOOK: query: CREATE DATABASE IF NOT EXISTS test_db COMMENT 'Hive test database' PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:test_db -POSTHOOK: query: -- CREATE INE doesn't exist -CREATE DATABASE IF NOT EXISTS test_db COMMENT 'Hive test database' +POSTHOOK: query: CREATE DATABASE IF NOT EXISTS test_db COMMENT 'Hive test database' POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:test_db PREHOOK: query: SHOW DATABASES @@ -72,13 +58,11 @@ POSTHOOK: query: SHOW DATABASES POSTHOOK: type: SHOWDATABASES default test_db -PREHOOK: query: -- DROP IE exists -DROP DATABASE IF EXISTS test_db +PREHOOK: query: DROP DATABASE IF EXISTS test_db PREHOOK: type: DROPDATABASE PREHOOK: Input: database:test_db PREHOOK: Output: database:test_db -POSTHOOK: query: -- DROP IE exists -DROP DATABASE IF EXISTS test_db +POSTHOOK: query: DROP DATABASE IF EXISTS test_db POSTHOOK: type: DROPDATABASE POSTHOOK: Input: database:test_db POSTHOOK: Output: database:test_db @@ -87,18 +71,14 @@ PREHOOK: type: SHOWDATABASES POSTHOOK: query: SHOW DATABASES POSTHOOK: type: SHOWDATABASES default -PREHOOK: query: -- DROP IE doesn't exist -DROP DATABASE IF EXISTS test_db +PREHOOK: query: DROP DATABASE IF EXISTS test_db PREHOOK: type: DROPDATABASE -POSTHOOK: query: -- DROP IE doesn't exist -DROP DATABASE IF EXISTS test_db +POSTHOOK: query: DROP DATABASE IF EXISTS test_db POSTHOOK: type: DROPDATABASE -PREHOOK: query: -- SHOW -CREATE DATABASE test_db +PREHOOK: query: CREATE DATABASE test_db PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:test_db -POSTHOOK: query: -- SHOW -CREATE DATABASE test_db +POSTHOOK: query: CREATE DATABASE test_db POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:test_db PREHOOK: query: SHOW DATABASES @@ -107,18 +87,14 @@ POSTHOOK: query: SHOW DATABASES POSTHOOK: type: SHOWDATABASES default test_db -PREHOOK: query: -- SHOW pattern -SHOW DATABASES LIKE 'test*' +PREHOOK: query: SHOW DATABASES LIKE 'test*' PREHOOK: type: SHOWDATABASES -POSTHOOK: query: -- SHOW pattern -SHOW DATABASES LIKE 'test*' +POSTHOOK: query: SHOW DATABASES LIKE 'test*' POSTHOOK: type: SHOWDATABASES test_db -PREHOOK: query: -- SHOW pattern -SHOW DATABASES LIKE '*ef*' +PREHOOK: query: SHOW DATABASES LIKE '*ef*' PREHOOK: type: SHOWDATABASES -POSTHOOK: query: -- SHOW pattern -SHOW DATABASES LIKE '*ef*' +POSTHOOK: query: SHOW DATABASES LIKE '*ef*' POSTHOOK: type: SHOWDATABASES default PREHOOK: query: USE test_db @@ -133,13 +109,11 @@ POSTHOOK: query: SHOW DATABASES POSTHOOK: type: SHOWDATABASES default test_db -PREHOOK: query: -- CREATE table in non-default DB -CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:test_db PREHOOK: Output: test_db@test_table -POSTHOOK: query: -- CREATE table in non-default DB -CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE test_table (col1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:test_db POSTHOOK: Output: test_db@test_table @@ -150,33 +124,27 @@ POSTHOOK: query: SHOW TABLES POSTHOOK: type: SHOWTABLES POSTHOOK: Input: database:test_db test_table -PREHOOK: query: -- DESCRIBE table in non-default DB -DESCRIBE test_table +PREHOOK: query: DESCRIBE test_table PREHOOK: type: DESCTABLE PREHOOK: Input: test_db@test_table -POSTHOOK: query: -- DESCRIBE table in non-default DB -DESCRIBE test_table +POSTHOOK: query: DESCRIBE test_table POSTHOOK: type: DESCTABLE POSTHOOK: Input: test_db@test_table col1 string -PREHOOK: query: -- DESCRIBE EXTENDED in non-default DB -DESCRIBE EXTENDED test_table +PREHOOK: query: DESCRIBE EXTENDED test_table PREHOOK: type: DESCTABLE PREHOOK: Input: test_db@test_table -POSTHOOK: query: -- DESCRIBE EXTENDED in non-default DB -DESCRIBE EXTENDED test_table +POSTHOOK: query: DESCRIBE EXTENDED test_table POSTHOOK: type: DESCTABLE POSTHOOK: Input: test_db@test_table col1 string #### A masked pattern was here #### -PREHOOK: query: -- CREATE LIKE in non-default DB -CREATE TABLE test_table_like LIKE test_table +PREHOOK: query: CREATE TABLE test_table_like LIKE test_table PREHOOK: type: CREATETABLE PREHOOK: Output: database:test_db PREHOOK: Output: test_db@test_table_like -POSTHOOK: query: -- CREATE LIKE in non-default DB -CREATE TABLE test_table_like LIKE test_table +POSTHOOK: query: CREATE TABLE test_table_like LIKE test_table POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:test_db POSTHOOK: Output: test_db@test_table_like @@ -197,14 +165,12 @@ POSTHOOK: Input: test_db@test_table_like col1 string #### A masked pattern was here #### -PREHOOK: query: -- LOAD and SELECT -LOAD DATA LOCAL INPATH '../../data/files/test.dat' +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/test.dat' OVERWRITE INTO TABLE test_table PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: test_db@test_table -POSTHOOK: query: -- LOAD and SELECT -LOAD DATA LOCAL INPATH '../../data/files/test.dat' +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/test.dat' OVERWRITE INTO TABLE test_table POSTHOOK: type: LOAD #### A masked pattern was here #### @@ -223,13 +189,11 @@ POSTHOOK: Input: test_db@test_table 4 5 6 -PREHOOK: query: -- DROP and CREATE w/o LOAD -DROP TABLE test_table +PREHOOK: query: DROP TABLE test_table PREHOOK: type: DROPTABLE PREHOOK: Input: test_db@test_table PREHOOK: Output: test_db@test_table -POSTHOOK: query: -- DROP and CREATE w/o LOAD -DROP TABLE test_table +POSTHOOK: query: DROP TABLE test_table POSTHOOK: type: DROPTABLE POSTHOOK: Input: test_db@test_table POSTHOOK: Output: test_db@test_table @@ -264,12 +228,10 @@ POSTHOOK: query: SELECT * FROM test_table POSTHOOK: type: QUERY POSTHOOK: Input: test_db@test_table #### A masked pattern was here #### -PREHOOK: query: -- CREATE table that already exists in DEFAULT -USE test_db +PREHOOK: query: USE test_db PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:test_db -POSTHOOK: query: -- CREATE table that already exists in DEFAULT -USE test_db +POSTHOOK: query: USE test_db POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:test_db PREHOOK: query: CREATE TABLE src (col1 STRING) STORED AS TEXTFILE @@ -321,12 +283,10 @@ POSTHOOK: Input: default@src 484 val_484 86 val_86 98 val_98 -PREHOOK: query: -- DROP DATABASE -USE test_db +PREHOOK: query: USE test_db PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:test_db -POSTHOOK: query: -- DROP DATABASE -USE test_db +POSTHOOK: query: USE test_db POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:test_db PREHOOK: query: DROP TABLE src @@ -378,12 +338,10 @@ PREHOOK: type: SHOWDATABASES POSTHOOK: query: SHOW DATABASES POSTHOOK: type: SHOWDATABASES default -PREHOOK: query: -- DROP EMPTY DATABASE CASCADE -CREATE DATABASE to_drop_db1 +PREHOOK: query: CREATE DATABASE to_drop_db1 PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:to_drop_db1 -POSTHOOK: query: -- DROP EMPTY DATABASE CASCADE -CREATE DATABASE to_drop_db1 +POSTHOOK: query: CREATE DATABASE to_drop_db1 POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:to_drop_db1 PREHOOK: query: SHOW DATABASES @@ -411,12 +369,10 @@ PREHOOK: type: SHOWDATABASES POSTHOOK: query: SHOW DATABASES POSTHOOK: type: SHOWDATABASES default -PREHOOK: query: -- DROP NON-EMPTY DATABASE CASCADE -CREATE DATABASE to_drop_db2 +PREHOOK: query: CREATE DATABASE to_drop_db2 PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:to_drop_db2 -POSTHOOK: query: -- DROP NON-EMPTY DATABASE CASCADE -CREATE DATABASE to_drop_db2 +POSTHOOK: query: CREATE DATABASE to_drop_db2 POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:to_drop_db2 PREHOOK: query: SHOW DATABASES @@ -479,12 +435,10 @@ PREHOOK: type: SHOWDATABASES POSTHOOK: query: SHOW DATABASES POSTHOOK: type: SHOWDATABASES default -PREHOOK: query: -- DROP NON-EMPTY DATABASE CASCADE IF EXISTS -CREATE DATABASE to_drop_db3 +PREHOOK: query: CREATE DATABASE to_drop_db3 PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:to_drop_db3 -POSTHOOK: query: -- DROP NON-EMPTY DATABASE CASCADE IF EXISTS -CREATE DATABASE to_drop_db3 +POSTHOOK: query: CREATE DATABASE to_drop_db3 POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:to_drop_db3 PREHOOK: query: SHOW DATABASES @@ -528,29 +482,23 @@ PREHOOK: type: SHOWDATABASES POSTHOOK: query: SHOW DATABASES POSTHOOK: type: SHOWDATABASES default -PREHOOK: query: -- DROP NON-EXISTING DATABASE CASCADE IF EXISTS -DROP DATABASE IF EXISTS non_exists_db3 CASCADE +PREHOOK: query: DROP DATABASE IF EXISTS non_exists_db3 CASCADE PREHOOK: type: DROPDATABASE -POSTHOOK: query: -- DROP NON-EXISTING DATABASE CASCADE IF EXISTS -DROP DATABASE IF EXISTS non_exists_db3 CASCADE +POSTHOOK: query: DROP DATABASE IF EXISTS non_exists_db3 CASCADE POSTHOOK: type: DROPDATABASE PREHOOK: query: SHOW DATABASES PREHOOK: type: SHOWDATABASES POSTHOOK: query: SHOW DATABASES POSTHOOK: type: SHOWDATABASES default -PREHOOK: query: -- DROP NON-EXISTING DATABASE RESTRICT IF EXISTS -DROP DATABASE IF EXISTS non_exists_db3 RESTRICT +PREHOOK: query: DROP DATABASE IF EXISTS non_exists_db3 RESTRICT PREHOOK: type: DROPDATABASE -POSTHOOK: query: -- DROP NON-EXISTING DATABASE RESTRICT IF EXISTS -DROP DATABASE IF EXISTS non_exists_db3 RESTRICT +POSTHOOK: query: DROP DATABASE IF EXISTS non_exists_db3 RESTRICT POSTHOOK: type: DROPDATABASE -PREHOOK: query: -- DROP EMPTY DATABASE RESTRICT -CREATE DATABASE to_drop_db4 +PREHOOK: query: CREATE DATABASE to_drop_db4 PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:to_drop_db4 -POSTHOOK: query: -- DROP EMPTY DATABASE RESTRICT -CREATE DATABASE to_drop_db4 +POSTHOOK: query: CREATE DATABASE to_drop_db4 POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:to_drop_db4 PREHOOK: query: SHOW DATABASES @@ -572,18 +520,10 @@ PREHOOK: type: SHOWDATABASES POSTHOOK: query: SHOW DATABASES POSTHOOK: type: SHOWDATABASES default -PREHOOK: query: -- --- Canonical Name Tests --- - -CREATE DATABASE db1 +PREHOOK: query: CREATE DATABASE db1 PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:db1 -POSTHOOK: query: -- --- Canonical Name Tests --- - -CREATE DATABASE db1 +POSTHOOK: query: CREATE DATABASE db1 POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:db1 PREHOOK: query: CREATE DATABASE db2 @@ -592,37 +532,31 @@ PREHOOK: Output: database:db2 POSTHOOK: query: CREATE DATABASE db2 POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:db2 -PREHOOK: query: -- CREATE foreign table -CREATE TABLE db1.src(key STRING, value STRING) +PREHOOK: query: CREATE TABLE db1.src(key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:db1 PREHOOK: Output: db1@src -POSTHOOK: query: -- CREATE foreign table -CREATE TABLE db1.src(key STRING, value STRING) +POSTHOOK: query: CREATE TABLE db1.src(key STRING, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db1 POSTHOOK: Output: db1@src -PREHOOK: query: -- LOAD into foreign table -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE db1.src PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: db1@src -POSTHOOK: query: -- LOAD into foreign table -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE db1.src POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: db1@src -PREHOOK: query: -- SELECT from foreign table -SELECT * FROM db1.src +PREHOOK: query: SELECT * FROM db1.src PREHOOK: type: QUERY PREHOOK: Input: db1@src #### A masked pattern was here #### -POSTHOOK: query: -- SELECT from foreign table -SELECT * FROM db1.src +POSTHOOK: query: SELECT * FROM db1.src POSTHOOK: type: QUERY POSTHOOK: Input: db1@src #### A masked pattern was here #### @@ -1126,44 +1060,38 @@ POSTHOOK: Input: db1@src 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- CREATE Partitioned foreign table -CREATE TABLE db1.srcpart(key STRING, value STRING) +PREHOOK: query: CREATE TABLE db1.srcpart(key STRING, value STRING) PARTITIONED BY (ds STRING, hr STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:db1 PREHOOK: Output: db1@srcpart -POSTHOOK: query: -- CREATE Partitioned foreign table -CREATE TABLE db1.srcpart(key STRING, value STRING) +POSTHOOK: query: CREATE TABLE db1.srcpart(key STRING, value STRING) PARTITIONED BY (ds STRING, hr STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db1 POSTHOOK: Output: db1@srcpart -PREHOOK: query: -- LOAD data into Partitioned foreign table -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE db1.srcpart PARTITION (ds='2008-04-08', hr='11') PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: db1@srcpart -POSTHOOK: query: -- LOAD data into Partitioned foreign table -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' OVERWRITE INTO TABLE db1.srcpart PARTITION (ds='2008-04-08', hr='11') POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: db1@srcpart POSTHOOK: Output: db1@srcpart@ds=2008-04-08/hr=11 -PREHOOK: query: -- SELECT from Partitioned foreign table -SELECT key, value FROM db1.srcpart +PREHOOK: query: SELECT key, value FROM db1.srcpart WHERE key < 100 AND ds='2008-04-08' AND hr='11' PREHOOK: type: QUERY PREHOOK: Input: db1@srcpart PREHOOK: Input: db1@srcpart@ds=2008-04-08/hr=11 #### A masked pattern was here #### -POSTHOOK: query: -- SELECT from Partitioned foreign table -SELECT key, value FROM db1.srcpart +POSTHOOK: query: SELECT key, value FROM db1.srcpart WHERE key < 100 AND ds='2008-04-08' AND hr='11' POSTHOOK: type: QUERY POSTHOOK: Input: db1@srcpart @@ -1253,12 +1181,10 @@ POSTHOOK: Input: db1@srcpart@ds=2008-04-08/hr=11 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- SELECT JOINed product of two foreign tables -USE db2 +PREHOOK: query: USE db2 PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:db2 -POSTHOOK: query: -- SELECT JOINed product of two foreign tables -USE db2 +POSTHOOK: query: USE db2 POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:db2 PREHOOK: query: SELECT a.* FROM db1.src a JOIN default.src1 b @@ -1310,38 +1236,33 @@ POSTHOOK: Input: default@src1 66 val_66 98 val_98 98 val_98 -PREHOOK: query: -- CREATE TABLE AS SELECT from foreign table -CREATE TABLE conflict_name AS +PREHOOK: query: CREATE TABLE conflict_name AS SELECT value FROM default.src WHERE key = 66 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:db2 PREHOOK: Output: db2@conflict_name -POSTHOOK: query: -- CREATE TABLE AS SELECT from foreign table -CREATE TABLE conflict_name AS +POSTHOOK: query: CREATE TABLE conflict_name AS SELECT value FROM default.src WHERE key = 66 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:db2 POSTHOOK: Output: db2@conflict_name POSTHOOK: Lineage: conflict_name.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- CREATE foreign table -CREATE TABLE db1.conflict_name AS +PREHOOK: query: CREATE TABLE db1.conflict_name AS SELECT value FROM db1.src WHERE key = 8 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: db1@src PREHOOK: Output: database:db1 PREHOOK: Output: db1@conflict_name -POSTHOOK: query: -- CREATE foreign table -CREATE TABLE db1.conflict_name AS +POSTHOOK: query: CREATE TABLE db1.conflict_name AS SELECT value FROM db1.src WHERE key = 8 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: db1@src POSTHOOK: Output: database:db1 POSTHOOK: Output: db1@conflict_name POSTHOOK: Lineage: conflict_name.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: -- query tables with the same names in different DBs -SELECT * FROM ( +PREHOOK: query: SELECT * FROM ( SELECT value FROM db1.conflict_name UNION ALL SELECT value FROM conflict_name @@ -1350,8 +1271,7 @@ PREHOOK: type: QUERY PREHOOK: Input: db1@conflict_name PREHOOK: Input: db2@conflict_name #### A masked pattern was here #### -POSTHOOK: query: -- query tables with the same names in different DBs -SELECT * FROM ( +POSTHOOK: query: SELECT * FROM ( SELECT value FROM db1.conflict_name UNION ALL SELECT value FROM conflict_name @@ -1388,14 +1308,12 @@ POSTHOOK: Input: db2@conflict_name #### A masked pattern was here #### val_66 val_8 -PREHOOK: query: -- TABLESAMPLES -CREATE TABLE bucketized_src (key INT, value STRING) +PREHOOK: query: CREATE TABLE bucketized_src (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucketized_src -POSTHOOK: query: -- TABLESAMPLES -CREATE TABLE bucketized_src (key INT, value STRING) +POSTHOOK: query: CREATE TABLE bucketized_src (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -1421,13 +1339,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@bucketized_src #### A masked pattern was here #### 66 -PREHOOK: query: -- CREATE TABLE LIKE -CREATE TABLE db2.src1 LIKE default.src +PREHOOK: query: CREATE TABLE db2.src1 LIKE default.src PREHOOK: type: CREATETABLE PREHOOK: Output: database:db2 PREHOOK: Output: db2@src1 -POSTHOOK: query: -- CREATE TABLE LIKE -CREATE TABLE db2.src1 LIKE default.src +POSTHOOK: query: CREATE TABLE db2.src1 LIKE default.src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:db2 POSTHOOK: Output: db2@src1 @@ -1447,13 +1363,11 @@ key string default value string default #### A masked pattern was here #### -PREHOOK: query: -- character escaping -SELECT key FROM `default`.src ORDER BY key LIMIT 1 +PREHOOK: query: SELECT key FROM `default`.src ORDER BY key LIMIT 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- character escaping -SELECT key FROM `default`.src ORDER BY key LIMIT 1 +POSTHOOK: query: SELECT key FROM `default`.src ORDER BY key LIMIT 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/llap/drop_partition_with_stats.q.out b/ql/src/test/results/clientpositive/llap/drop_partition_with_stats.q.out index e27e557..c6ab40d 100644 --- a/ql/src/test/results/clientpositive/llap/drop_partition_with_stats.q.out +++ b/ql/src/test/results/clientpositive/llap/drop_partition_with_stats.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- This test verifies that a table partition could be dropped with columns stats computed --- The column stats for a partitioned table will go to PART_COL_STATS -CREATE DATABASE IF NOT EXISTS partstatsdb1 +PREHOOK: query: CREATE DATABASE IF NOT EXISTS partstatsdb1 PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:partstatsdb1 -POSTHOOK: query: -- This test verifies that a table partition could be dropped with columns stats computed --- The column stats for a partitioned table will go to PART_COL_STATS -CREATE DATABASE IF NOT EXISTS partstatsdb1 +POSTHOOK: query: CREATE DATABASE IF NOT EXISTS partstatsdb1 POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:partstatsdb1 PREHOOK: query: USE partstatsdb1 diff --git a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out index d3acbcd..6b8a249 100644 --- a/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out +++ b/ql/src/test/results/clientpositive/llap/dynamic_partition_pruning.q.out @@ -198,11 +198,9 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@srcpart_double_hour POSTHOOK: Lineage: srcpart_double_hour.hour SIMPLE [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ] POSTHOOK: Lineage: srcpart_double_hour.hr EXPRESSION [(srcpart)srcpart.FieldSchema(name:hr, type:string, comment:null), ] -PREHOOK: query: -- single column, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- single column, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -456,11 +454,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 1000 -PREHOOK: query: -- single column, single key, udf with typechange -EXPLAIN select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08' +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- single column, single key, udf with typechange -EXPLAIN select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08' +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -701,12 +697,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 POSTHOOK: Input: default@srcpart_date #### A masked pattern was here #### 1000 -PREHOOK: query: -- multiple sources, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 PREHOOK: type: QUERY -POSTHOOK: query: -- multiple sources, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1059,11 +1053,9 @@ POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 #### A masked pattern was here #### 500 -PREHOOK: query: -- multiple columns single source -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 PREHOOK: type: QUERY -POSTHOOK: query: -- multiple columns single source -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1329,11 +1321,9 @@ POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 #### A masked pattern was here #### 500 -PREHOOK: query: -- empty set -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' PREHOOK: type: QUERY -POSTHOOK: query: -- empty set -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1583,11 +1573,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart #### A masked pattern was here #### 0 -PREHOOK: query: -- expressions -EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 PREHOOK: type: QUERY -POSTHOOK: query: -- expressions -EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2227,11 +2215,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 #### A masked pattern was here #### 1000 Warning: Shuffle Join MERGEJOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product -PREHOOK: query: -- parent is reduce tasks -EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' +PREHOOK: query: EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- parent is reduce tasks -EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' +POSTHOOK: query: EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2362,11 +2348,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 1000 Warning: Shuffle Join MERGEJOIN[16][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product -PREHOOK: query: -- non-equi join -EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) +PREHOOK: query: EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) PREHOOK: type: QUERY -POSTHOOK: query: -- non-equi join -EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) +POSTHOOK: query: EXPLAIN select count(*) from srcpart, srcpart_date_hour where (srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11) and (srcpart.ds = srcpart_date_hour.ds or srcpart.hr = srcpart_date_hour.hr) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2482,11 +2466,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 POSTHOOK: Input: default@srcpart_date_hour #### A masked pattern was here #### 1500 -PREHOOK: query: -- old style join syntax -EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr +PREHOOK: query: EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr PREHOOK: type: QUERY -POSTHOOK: query: -- old style join syntax -EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr +POSTHOOK: query: EXPLAIN select count(*) from srcpart, srcpart_date_hour where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 and srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2627,11 +2609,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 POSTHOOK: Input: default@srcpart_date_hour #### A masked pattern was here #### 500 -PREHOOK: query: -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' +PREHOOK: query: EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' +POSTHOOK: query: EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2848,11 +2828,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' +PREHOOK: query: EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' +POSTHOOK: query: EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -2959,12 +2937,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- with static pruning -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 PREHOOK: type: QUERY -POSTHOOK: query: -- with static pruning -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -3282,11 +3258,9 @@ POSTHOOK: Input: default@srcpart_date POSTHOOK: Input: default@srcpart_hour #### A masked pattern was here #### 0 -PREHOOK: query: -- union + subquery -EXPLAIN select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +PREHOOK: query: EXPLAIN select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) PREHOOK: type: QUERY -POSTHOOK: query: -- union + subquery -EXPLAIN select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +POSTHOOK: query: EXPLAIN select count(*) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -3936,11 +3910,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 2008-04-08 2008-04-09 2008-04-09 -PREHOOK: query: -- single column, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- single column, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -4074,11 +4046,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 1000 -PREHOOK: query: -- single column, single key, udf with typechange -EXPLAIN select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08' +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- single column, single key, udf with typechange -EXPLAIN select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08' +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (day(srcpart.ds) = day(srcpart_date.ds)) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -4199,12 +4169,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 POSTHOOK: Input: default@srcpart_date #### A masked pattern was here #### 1000 -PREHOOK: query: -- multiple sources, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 PREHOOK: type: QUERY -POSTHOOK: query: -- multiple sources, single key -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -4385,11 +4353,9 @@ POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 #### A masked pattern was here #### 500 -PREHOOK: query: -- multiple columns single source -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 PREHOOK: type: QUERY -POSTHOOK: query: -- multiple columns single source -EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date_hour on (srcpart.ds = srcpart_date_hour.ds and srcpart.hr = srcpart_date_hour.hr) where srcpart_date_hour.`date` = '2008-04-08' and srcpart_date_hour.hour = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -4535,11 +4501,9 @@ POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 #### A masked pattern was here #### 500 -PREHOOK: query: -- empty set -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' PREHOOK: type: QUERY -POSTHOOK: query: -- empty set -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = 'I DONT EXIST' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -4660,11 +4624,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 POSTHOOK: Input: default@srcpart_date #### A masked pattern was here #### 0 -PREHOOK: query: -- expressions -EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 PREHOOK: type: QUERY -POSTHOOK: query: -- expressions -EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_double_hour on (srcpart.hr = cast(srcpart_double_hour.hr/2 as int)) where srcpart_double_hour.hour = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -4922,13 +4884,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 #### A masked pattern was here #### 1000 Warning: Map Join MAPJOIN[22][bigTable=?] in task 'Reducer 3' is a cross product -PREHOOK: query: -- parent is reduce tasks - -EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' +PREHOOK: query: EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- parent is reduce tasks - -EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' +POSTHOOK: query: EXPLAIN select count(*) from srcpart join (select ds as ds, ds as `date` from srcpart group by ds) s on (srcpart.ds = s.ds) where s.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -5053,11 +5011,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 1000 -PREHOOK: query: -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' +PREHOOK: query: EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- left join -EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' +POSTHOOK: query: EXPLAIN select count(*) from srcpart left join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -5247,11 +5203,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' +PREHOOK: query: EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- full outer -EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' +POSTHOOK: query: EXPLAIN select count(*) from srcpart full outer join srcpart_date on (srcpart.ds = srcpart_date.ds) where srcpart_date.`date` = '2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -5337,12 +5291,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- with static pruning -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +PREHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 PREHOOK: type: QUERY -POSTHOOK: query: -- with static pruning -EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) +POSTHOOK: query: EXPLAIN select count(*) from srcpart join srcpart_date on (srcpart.ds = srcpart_date.ds) join srcpart_hour on (srcpart.hr = srcpart_hour.hr) where srcpart_date.`date` = '2008-04-08' and srcpart_hour.hour = 11 and srcpart.hr = 11 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -5633,11 +5585,9 @@ POSTHOOK: Input: default@srcpart_date POSTHOOK: Input: default@srcpart_hour #### A masked pattern was here #### 0 -PREHOOK: query: -- union + subquery -EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +PREHOOK: query: EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) PREHOOK: type: QUERY -POSTHOOK: query: -- union + subquery -EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) +POSTHOOK: query: EXPLAIN select distinct(ds) from srcpart where srcpart.ds in (select max(srcpart.ds) from srcpart union all select min(srcpart.ds) from srcpart) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -5816,13 +5766,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 2008-04-08 2008-04-09 -PREHOOK: query: -- different file format -create table srcpart_orc (key int, value string) partitioned by (ds string, hr int) stored as orc +PREHOOK: query: create table srcpart_orc (key int, value string) partitioned by (ds string, hr int) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@srcpart_orc -POSTHOOK: query: -- different file format -create table srcpart_orc (key int, value string) partitioned by (ds string, hr int) stored as orc +POSTHOOK: query: create table srcpart_orc (key int, value string) partitioned by (ds string, hr int) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@srcpart_orc diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out index 2248a35..a7e8518 100644 --- a/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out +++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_opt_vectorization.q.out @@ -151,11 +151,9 @@ POSTHOOK: query: create table over1k_part_buck_sort_orc( POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@over1k_part_buck_sort_orc -PREHOOK: query: -- map-only jobs converted to map-reduce job by hive.optimize.sort.dynamic.partition optimization -explain insert overwrite table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si +PREHOOK: query: explain insert overwrite table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si PREHOOK: type: QUERY -POSTHOOK: query: -- map-only jobs converted to map-reduce job by hive.optimize.sort.dynamic.partition optimization -explain insert overwrite table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si +POSTHOOK: query: explain insert overwrite table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -530,11 +528,9 @@ POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=__HIVE_DEFAULT_PARTITIO POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=__HIVE_DEFAULT_PARTITION__).f SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:f, type:float, comment:null), ] POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=__HIVE_DEFAULT_PARTITION__).i SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:i, type:int, comment:null), ] POSTHOOK: Lineage: over1k_part_buck_sort_orc PARTITION(t=__HIVE_DEFAULT_PARTITION__).si SIMPLE [(over1k_orc)over1k_orc.FieldSchema(name:si, type:smallint, comment:null), ] -PREHOOK: query: -- map-reduce jobs modified by hive.optimize.sort.dynamic.partition optimization -explain insert into table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si +PREHOOK: query: explain insert into table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si PREHOOK: type: QUERY -POSTHOOK: query: -- map-reduce jobs modified by hive.optimize.sort.dynamic.partition optimization -explain insert into table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si +POSTHOOK: query: explain insert into table over1k_part_orc partition(ds="foo", t) select si,i,b,f,t from over1k_orc where t is null or t=27 order by si POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1293,8 +1289,7 @@ POSTHOOK: Input: default@over1k_part_buck_sort_orc@t=27 POSTHOOK: Input: default@over1k_part_buck_sort_orc@t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### 38 -PREHOOK: query: -- tests for HIVE-6883 -create table over1k_part2_orc( +PREHOOK: query: create table over1k_part2_orc( si smallint, i int, b bigint, @@ -1303,8 +1298,7 @@ create table over1k_part2_orc( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@over1k_part2_orc -POSTHOOK: query: -- tests for HIVE-6883 -create table over1k_part2_orc( +POSTHOOK: query: create table over1k_part2_orc( si smallint, i int, b bigint, @@ -1630,11 +1624,9 @@ STAGE PLANS: Stage: Stage-3 Stats-Aggr Operator -PREHOOK: query: -- tests for HIVE-8162, only partition column 't' should be in last RS operator -explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t +PREHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t PREHOOK: type: QUERY -POSTHOOK: query: -- tests for HIVE-8162, only partition column 't' should be in last RS operator -explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t +POSTHOOK: query: explain insert overwrite table over1k_part2_orc partition(ds="foo",t) select si,i,b,f,t from over1k_orc where t is null or t=27 group by si,i,b,f,t POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1811,15 +1803,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- SORT_BEFORE_DIFF -select * from over1k_part2_orc +PREHOOK: query: select * from over1k_part2_orc PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part2_orc PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=27 PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### -POSTHOOK: query: -- SORT_BEFORE_DIFF -select * from over1k_part2_orc +POSTHOOK: query: select * from over1k_part2_orc POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part2_orc POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=27 @@ -1958,15 +1948,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- SORT_BEFORE_DIFF -select * from over1k_part2_orc +PREHOOK: query: select * from over1k_part2_orc PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part2_orc PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=27 PREHOOK: Input: default@over1k_part2_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### -POSTHOOK: query: -- SORT_BEFORE_DIFF -select * from over1k_part2_orc +POSTHOOK: query: select * from over1k_part2_orc POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part2_orc POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=27 @@ -2004,10 +1992,7 @@ POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=27 POSTHOOK: Input: default@over1k_part2_orc@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### 19 -PREHOOK: query: -- hadoop-1 does not honor number of reducers in local mode. There is always only 1 reducer irrespective of the number of buckets. --- Hence all records go to one bucket and all other buckets will be empty. Similar to HIVE-6867. However, hadoop-2 honors number --- of reducers and records are spread across all reducers. To avoid this inconsistency we will make number of buckets to 1 for this test. -create table over1k_part_buck_sort2_orc( +PREHOOK: query: create table over1k_part_buck_sort2_orc( si smallint, i int, b bigint, @@ -2018,10 +2003,7 @@ create table over1k_part_buck_sort2_orc( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@over1k_part_buck_sort2_orc -POSTHOOK: query: -- hadoop-1 does not honor number of reducers in local mode. There is always only 1 reducer irrespective of the number of buckets. --- Hence all records go to one bucket and all other buckets will be empty. Similar to HIVE-6867. However, hadoop-2 honors number --- of reducers and records are spread across all reducers. To avoid this inconsistency we will make number of buckets to 1 for this test. -create table over1k_part_buck_sort2_orc( +POSTHOOK: query: create table over1k_part_buck_sort2_orc( si smallint, i int, b bigint, diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization.q.out index 5569011..b15655d 100644 --- a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization.q.out +++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization.q.out @@ -108,11 +108,9 @@ POSTHOOK: query: create table over1k_part_buck_sort( POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@over1k_part_buck_sort -PREHOOK: query: -- map-only jobs converted to map-reduce job by hive.optimize.sort.dynamic.partition optimization -explain insert overwrite table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 +PREHOOK: query: explain insert overwrite table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 PREHOOK: type: QUERY -POSTHOOK: query: -- map-only jobs converted to map-reduce job by hive.optimize.sort.dynamic.partition optimization -explain insert overwrite table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 +POSTHOOK: query: explain insert overwrite table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -487,11 +485,9 @@ POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=__HIVE_DEFAULT_PARTITION__) POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=__HIVE_DEFAULT_PARTITION__).f SIMPLE [(over1k)over1k.FieldSchema(name:f, type:float, comment:null), ] POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=__HIVE_DEFAULT_PARTITION__).i SIMPLE [(over1k)over1k.FieldSchema(name:i, type:int, comment:null), ] POSTHOOK: Lineage: over1k_part_buck_sort PARTITION(t=__HIVE_DEFAULT_PARTITION__).si SIMPLE [(over1k)over1k.FieldSchema(name:si, type:smallint, comment:null), ] -PREHOOK: query: -- map-reduce jobs modified by hive.optimize.sort.dynamic.partition optimization -explain insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 +PREHOOK: query: explain insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 PREHOOK: type: QUERY -POSTHOOK: query: -- map-reduce jobs modified by hive.optimize.sort.dynamic.partition optimization -explain insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 +POSTHOOK: query: explain insert into table over1k_part partition(ds="foo", t) select si,i,b,f,t from over1k where t is null or t=27 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1250,8 +1246,7 @@ POSTHOOK: Input: default@over1k_part_buck_sort@t=27 POSTHOOK: Input: default@over1k_part_buck_sort@t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### 38 -PREHOOK: query: -- tests for HIVE-6883 -create table over1k_part2( +PREHOOK: query: create table over1k_part2( si smallint, i int, b bigint, @@ -1260,8 +1255,7 @@ create table over1k_part2( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@over1k_part2 -POSTHOOK: query: -- tests for HIVE-6883 -create table over1k_part2( +POSTHOOK: query: create table over1k_part2( si smallint, i int, b bigint, @@ -1587,11 +1581,9 @@ STAGE PLANS: Stage: Stage-3 Stats-Aggr Operator -PREHOOK: query: -- tests for HIVE-8162, only partition column 't' should be in last RS operator -explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t +PREHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t PREHOOK: type: QUERY -POSTHOOK: query: -- tests for HIVE-8162, only partition column 't' should be in last RS operator -explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t +POSTHOOK: query: explain insert overwrite table over1k_part2 partition(ds="foo",t) select si,i,b,f,t from over1k where t is null or t=27 group by si,i,b,f,t POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1768,15 +1760,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- SORT_BEFORE_DIFF -select * from over1k_part2 +PREHOOK: query: select * from over1k_part2 PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part2 PREHOOK: Input: default@over1k_part2@ds=foo/t=27 PREHOOK: Input: default@over1k_part2@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### -POSTHOOK: query: -- SORT_BEFORE_DIFF -select * from over1k_part2 +POSTHOOK: query: select * from over1k_part2 POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part2 POSTHOOK: Input: default@over1k_part2@ds=foo/t=27 @@ -1915,15 +1905,13 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- SORT_BEFORE_DIFF -select * from over1k_part2 +PREHOOK: query: select * from over1k_part2 PREHOOK: type: QUERY PREHOOK: Input: default@over1k_part2 PREHOOK: Input: default@over1k_part2@ds=foo/t=27 PREHOOK: Input: default@over1k_part2@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### -POSTHOOK: query: -- SORT_BEFORE_DIFF -select * from over1k_part2 +POSTHOOK: query: select * from over1k_part2 POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k_part2 POSTHOOK: Input: default@over1k_part2@ds=foo/t=27 @@ -1961,10 +1949,7 @@ POSTHOOK: Input: default@over1k_part2@ds=foo/t=27 POSTHOOK: Input: default@over1k_part2@ds=foo/t=__HIVE_DEFAULT_PARTITION__ #### A masked pattern was here #### 19 -PREHOOK: query: -- hadoop-1 does not honor number of reducers in local mode. There is always only 1 reducer irrespective of the number of buckets. --- Hence all records go to one bucket and all other buckets will be empty. Similar to HIVE-6867. However, hadoop-2 honors number --- of reducers and records are spread across all reducers. To avoid this inconsistency we will make number of buckets to 1 for this test. -create table over1k_part_buck_sort2( +PREHOOK: query: create table over1k_part_buck_sort2( si smallint, i int, b bigint, @@ -1975,10 +1960,7 @@ create table over1k_part_buck_sort2( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@over1k_part_buck_sort2 -POSTHOOK: query: -- hadoop-1 does not honor number of reducers in local mode. There is always only 1 reducer irrespective of the number of buckets. --- Hence all records go to one bucket and all other buckets will be empty. Similar to HIVE-6867. However, hadoop-2 honors number --- of reducers and records are spread across all reducers. To avoid this inconsistency we will make number of buckets to 1 for this test. -create table over1k_part_buck_sort2( +POSTHOOK: query: create table over1k_part_buck_sort2( si smallint, i int, b bigint, @@ -3091,13 +3073,11 @@ POSTHOOK: Input: default@over1k_part3@s=wendy van buren/t=27/i=65680 POSTHOOK: Input: default@over1k_part3@s=xavier quirinius/t=27/i=65599 #### A masked pattern was here #### 17814641134 -PREHOOK: query: -- cross verify results with SDPO disabled -drop table over1k_part3 +PREHOOK: query: drop table over1k_part3 PREHOOK: type: DROPTABLE PREHOOK: Input: default@over1k_part3 PREHOOK: Output: default@over1k_part3 -POSTHOOK: query: -- cross verify results with SDPO disabled -drop table over1k_part3 +POSTHOOK: query: drop table over1k_part3 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@over1k_part3 POSTHOOK: Output: default@over1k_part3 diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out index ba56486..7fb8ed9 100644 --- a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out +++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization2.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -drop table ss +PREHOOK: query: drop table ss PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -drop table ss +POSTHOOK: query: drop table ss POSTHOOK: type: DROPTABLE PREHOOK: query: drop table ss_orc PREHOOK: type: DROPTABLE @@ -533,9 +529,7 @@ POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452638 317.87 -3775.38 2452638 4133.98 -775.72 2452638 4329.49 -4000.51 2452638 -PREHOOK: query: -- SORT DYNAMIC PARTITION DISABLED - -explain insert overwrite table ss_part partition (ss_sold_date_sk) +PREHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk) select ss_net_paid_inc_tax, ss_net_profit, ss_sold_date_sk @@ -546,9 +540,7 @@ select ss_net_paid_inc_tax, ss_net_profit distribute by ss_sold_date_sk PREHOOK: type: QUERY -POSTHOOK: query: -- SORT DYNAMIC PARTITION DISABLED - -explain insert overwrite table ss_part partition (ss_sold_date_sk) +POSTHOOK: query: explain insert overwrite table ss_part partition (ss_sold_date_sk) select ss_net_paid_inc_tax, ss_net_profit, ss_sold_date_sk @@ -1015,18 +1007,14 @@ POSTHOOK: Input: default@ss_part@ss_sold_date_sk=2452638 317.87 -3775.38 2452638 4133.98 -775.72 2452638 4329.49 -4000.51 2452638 -PREHOOK: query: -- VECTORIZATION IS ENABLED - -create table ss_orc ( +PREHOOK: query: create table ss_orc ( ss_sold_date_sk int, ss_net_paid_inc_tax float, ss_net_profit float) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@ss_orc -POSTHOOK: query: -- VECTORIZATION IS ENABLED - -create table ss_orc ( +POSTHOOK: query: create table ss_orc ( ss_sold_date_sk int, ss_net_paid_inc_tax float, ss_net_profit float) stored as orc diff --git a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out index 604ec61..b7679f1 100644 --- a/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out +++ b/ql/src/test/results/clientpositive/llap/dynpart_sort_optimization_acid.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- single level partition, sorted dynamic partition disabled -drop table acid +PREHOOK: query: drop table acid PREHOOK: type: DROPTABLE -POSTHOOK: query: -- single level partition, sorted dynamic partition disabled -drop table acid +POSTHOOK: query: drop table acid POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE acid(key string, value string) PARTITIONED BY(ds string) CLUSTERED BY(key) INTO 2 BUCKETS STORED AS ORC TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE @@ -33,13 +31,11 @@ POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08).key SIMPLE [(srcpart)srcpart.Fi POSTHOOK: Lineage: acid PARTITION(ds=2008-04-08).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: acid PARTITION(ds=2008-04-09).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: acid PARTITION(ds=2008-04-09).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- explicitly set statistics to avoid flakiness -alter table acid partition(ds='2008-04-08') update statistics set('numRows'='1600', 'rawDataSize'='18000') +PREHOOK: query: alter table acid partition(ds='2008-04-08') update statistics set('numRows'='1600', 'rawDataSize'='18000') PREHOOK: type: ALTERTABLE_UPDATEPARTSTATS PREHOOK: Input: default@acid PREHOOK: Output: default@acid@ds=2008-04-08 -POSTHOOK: query: -- explicitly set statistics to avoid flakiness -alter table acid partition(ds='2008-04-08') update statistics set('numRows'='1600', 'rawDataSize'='18000') +POSTHOOK: query: alter table acid partition(ds='2008-04-08') update statistics set('numRows'='1600', 'rawDataSize'='18000') POSTHOOK: type: ALTERTABLE_UPDATEPARTSTATS POSTHOOK: Input: default@acid POSTHOOK: Input: default@acid@ds=2008-04-08 @@ -280,13 +276,11 @@ POSTHOOK: Input: default@acid POSTHOOK: Input: default@acid@ds=2008-04-08 #### A masked pattern was here #### 1000 -PREHOOK: query: -- single level partition, sorted dynamic partition enabled -drop table acid +PREHOOK: query: drop table acid PREHOOK: type: DROPTABLE PREHOOK: Input: default@acid PREHOOK: Output: default@acid -POSTHOOK: query: -- single level partition, sorted dynamic partition enabled -drop table acid +POSTHOOK: query: drop table acid POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@acid POSTHOOK: Output: default@acid @@ -563,13 +557,11 @@ POSTHOOK: Input: default@acid POSTHOOK: Input: default@acid@ds=2008-04-08 #### A masked pattern was here #### 1000 -PREHOOK: query: -- 2 level partition, sorted dynamic partition disabled -drop table acid +PREHOOK: query: drop table acid PREHOOK: type: DROPTABLE PREHOOK: Input: default@acid PREHOOK: Output: default@acid -POSTHOOK: query: -- 2 level partition, sorted dynamic partition disabled -drop table acid +POSTHOOK: query: drop table acid POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@acid POSTHOOK: Output: default@acid @@ -864,12 +856,10 @@ POSTHOOK: Input: default@acid POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 #### A masked pattern was here #### 500 -PREHOOK: query: -- test with bucketing column not in select list -explain +PREHOOK: query: explain delete from acid where value = 'bar' PREHOOK: type: QUERY -POSTHOOK: query: -- test with bucketing column not in select list -explain +POSTHOOK: query: explain delete from acid where value = 'bar' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -980,13 +970,11 @@ POSTHOOK: Input: default@acid@ds=2008-04-09/hr=11 POSTHOOK: Input: default@acid@ds=2008-04-09/hr=12 #### A masked pattern was here #### 2000 -PREHOOK: query: -- 2 level partition, sorted dynamic partition enabled -drop table acid +PREHOOK: query: drop table acid PREHOOK: type: DROPTABLE PREHOOK: Input: default@acid PREHOOK: Output: default@acid -POSTHOOK: query: -- 2 level partition, sorted dynamic partition enabled -drop table acid +POSTHOOK: query: drop table acid POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@acid POSTHOOK: Output: default@acid @@ -1280,12 +1268,10 @@ POSTHOOK: Input: default@acid POSTHOOK: Input: default@acid@ds=2008-04-08/hr=11 #### A masked pattern was here #### 500 -PREHOOK: query: -- test with bucketing column not in select list -explain +PREHOOK: query: explain delete from acid where value = 'bar' PREHOOK: type: QUERY -POSTHOOK: query: -- test with bucketing column not in select list -explain +POSTHOOK: query: explain delete from acid where value = 'bar' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1395,13 +1381,11 @@ POSTHOOK: Input: default@acid@ds=2008-04-09/hr=11 POSTHOOK: Input: default@acid@ds=2008-04-09/hr=12 #### A masked pattern was here #### 2000 -PREHOOK: query: -- 2 level partition, sorted dynamic partition enabled, constant propagation disabled -drop table acid +PREHOOK: query: drop table acid PREHOOK: type: DROPTABLE PREHOOK: Input: default@acid PREHOOK: Output: default@acid -POSTHOOK: query: -- 2 level partition, sorted dynamic partition enabled, constant propagation disabled -drop table acid +POSTHOOK: query: drop table acid POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@acid POSTHOOK: Output: default@acid diff --git a/ql/src/test/results/clientpositive/llap/explainuser_4.q.out b/ql/src/test/results/clientpositive/llap/explainuser_4.q.out index 4084206..b0df3a0 100644 --- a/ql/src/test/results/clientpositive/llap/explainuser_4.q.out +++ b/ql/src/test/results/clientpositive/llap/explainuser_4.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- First try with regular mergejoin -explain +PREHOOK: query: explain select * from alltypesorc a join alltypesorc b on a.cint = b.cint @@ -7,8 +6,7 @@ where a.cint between 1000000 and 3000000 and b.cbigint is not null order by a.cint PREHOOK: type: QUERY -POSTHOOK: query: -- First try with regular mergejoin -explain +POSTHOOK: query: explain select * from alltypesorc a join alltypesorc b on a.cint = b.cint @@ -239,8 +237,7 @@ POSTHOOK: Input: default@alltypesorc -3799 1 10782 1 NULL 6 -PREHOOK: query: -- Try with dynamically partitioned hashjoin -explain +PREHOOK: query: explain select * from alltypesorc a join alltypesorc b on a.cint = b.cint @@ -248,8 +245,7 @@ where a.cint between 1000000 and 3000000 and b.cbigint is not null order by a.cint PREHOOK: type: QUERY -POSTHOOK: query: -- Try with dynamically partitioned hashjoin -explain +POSTHOOK: query: explain select * from alltypesorc a join alltypesorc b on a.cint = b.cint @@ -481,14 +477,12 @@ POSTHOOK: Input: default@alltypesorc 10782 1 NULL 6 Warning: Shuffle Join MERGEJOIN[9][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product -PREHOOK: query: -- Left outer join with residual -explain +PREHOOK: query: explain select * from alltypesorc a left outer join alltypesorc b on a.cint = b.cint or a.csmallint between 1 and 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Left outer join with residual -explain +POSTHOOK: query: explain select * from alltypesorc a left outer join alltypesorc b on a.cint = b.cint or a.csmallint between 1 and 10 diff --git a/ql/src/test/results/clientpositive/llap/filter_join_breaktask.q.out b/ql/src/test/results/clientpositive/llap/filter_join_breaktask.q.out index 284ffb9..45a919a 100644 --- a/ql/src/test/results/clientpositive/llap/filter_join_breaktask.q.out +++ b/ql/src/test/results/clientpositive/llap/filter_join_breaktask.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string) +PREHOOK: query: CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@filter_join_breaktask -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string) +POSTHOOK: query: CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@filter_join_breaktask diff --git a/ql/src/test/results/clientpositive/llap/filter_join_breaktask2.q.out b/ql/src/test/results/clientpositive/llap/filter_join_breaktask2.q.out index af85af9..6a2396e 100644 --- a/ql/src/test/results/clientpositive/llap/filter_join_breaktask2.q.out +++ b/ql/src/test/results/clientpositive/llap/filter_join_breaktask2.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) +PREHOOK: query: create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) +POSTHOOK: query: create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/llap/groupby1.q.out b/ql/src/test/results/clientpositive/llap/groupby1.q.out index ba0a09a..0eecbb6 100644 --- a/ql/src/test/results/clientpositive/llap/groupby1.q.out +++ b/ql/src/test/results/clientpositive/llap/groupby1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_g1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_g1 diff --git a/ql/src/test/results/clientpositive/llap/groupby2.q.out b/ql/src/test/results/clientpositive/llap/groupby2.q.out index 8286589..29b85d1 100644 --- a/ql/src/test/results/clientpositive/llap/groupby2.q.out +++ b/ql/src/test/results/clientpositive/llap/groupby2.q.out @@ -111,15 +111,11 @@ POSTHOOK: Output: default@dest_g2 POSTHOOK: Lineage: dest_g2.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: dest_g2.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: dest_g2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT dest_g2.* FROM dest_g2 +PREHOOK: query: SELECT dest_g2.* FROM dest_g2 PREHOOK: type: QUERY PREHOOK: Input: default@dest_g2 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT dest_g2.* FROM dest_g2 +POSTHOOK: query: SELECT dest_g2.* FROM dest_g2 POSTHOOK: type: QUERY POSTHOOK: Input: default@dest_g2 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/llap/groupby_grouping_id2.q.out b/ql/src/test/results/clientpositive/llap/groupby_grouping_id2.q.out index 20f2fb9..9c85852 100644 --- a/ql/src/test/results/clientpositive/llap/groupby_grouping_id2.q.out +++ b/ql/src/test/results/clientpositive/llap/groupby_grouping_id2.q.out @@ -14,15 +14,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP +PREHOOK: query: SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP +POSTHOOK: query: SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/llap/groupby_resolution.q.out b/ql/src/test/results/clientpositive/llap/groupby_resolution.q.out index 53e52ee..f2a6ab0 100644 --- a/ql/src/test/results/clientpositive/llap/groupby_resolution.q.out +++ b/ql/src/test/results/clientpositive/llap/groupby_resolution.q.out @@ -530,8 +530,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- windowing after group by -select key, count(*), rank() over(order by count(*)) +PREHOOK: query: select key, count(*), rank() over(order by count(*)) from src b where key < '12' group by b.key @@ -539,8 +538,7 @@ order by b.key PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- windowing after group by -select key, count(*), rank() over(order by count(*)) +POSTHOOK: query: select key, count(*), rank() over(order by count(*)) from src b where key < '12' group by b.key @@ -561,8 +559,7 @@ POSTHOOK: Input: default@src 116 1 1 118 2 7 119 3 12 -PREHOOK: query: -- having after group by -select key, count(*) +PREHOOK: query: select key, count(*) from src b group by b.key having key < '12' @@ -570,8 +567,7 @@ order by b.key PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- having after group by -select key, count(*) +POSTHOOK: query: select key, count(*) from src b group by b.key having key < '12' @@ -592,8 +588,7 @@ POSTHOOK: Input: default@src 116 1 118 2 119 3 -PREHOOK: query: -- having and windowing -select key, count(*), rank() over(order by count(*)) +PREHOOK: query: select key, count(*), rank() over(order by count(*)) from src b group by b.key having key < '12' @@ -601,8 +596,7 @@ order by b.key PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- having and windowing -select key, count(*), rank() over(order by count(*)) +POSTHOOK: query: select key, count(*), rank() over(order by count(*)) from src b group by b.key having key < '12' @@ -747,8 +741,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- order by -select key +PREHOOK: query: select key from src t where key < '12' group by t.key @@ -756,8 +749,7 @@ order by t.key PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- order by -select key +POSTHOOK: query: select key from src t where key < '12' group by t.key @@ -778,12 +770,10 @@ POSTHOOK: Input: default@src 116 118 119 -PREHOOK: query: -- cluster by -EXPLAIN +PREHOOK: query: EXPLAIN SELECT x.key, x.value as key FROM SRC x CLUSTER BY key PREHOOK: type: QUERY -POSTHOOK: query: -- cluster by -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT x.key, x.value as key FROM SRC x CLUSTER BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/llap/having.q.out b/ql/src/test/results/clientpositive/llap/having.q.out index 9438910..267254c 100644 --- a/ql/src/test/results/clientpositive/llap/having.q.out +++ b/ql/src/test/results/clientpositive/llap/having.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS -EXPLAIN SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3 +PREHOOK: query: EXPLAIN SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS -EXPLAIN SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3 +POSTHOOK: query: EXPLAIN SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out index 8ec11eb..7d518c7 100644 --- a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out +++ b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_1.q.out @@ -1,28 +1,13 @@ -PREHOOK: query: -- Hybrid Grace Hash Join --- Test basic functionalities: --- 1. Various cases when hash partitions spill --- 2. Partitioned table spilling --- 3. Vectorization - - -SELECT 1 +PREHOOK: query: SELECT 1 PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: -- Hybrid Grace Hash Join --- Test basic functionalities: --- 1. Various cases when hash partitions spill --- 2. Partitioned table spilling --- 3. Vectorization - - -SELECT 1 +POSTHOOK: query: SELECT 1 POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 1 -PREHOOK: query: -- Base result for inner join -explain +PREHOOK: query: explain select count(*) from (select c.ctinyint from alltypesorc c @@ -30,8 +15,7 @@ select count(*) from on cd.cint = c.cint where c.cint < 2000000000) t1 PREHOOK: type: QUERY -POSTHOOK: query: -- Base result for inner join -explain +POSTHOOK: query: explain select count(*) from (select c.ctinyint from alltypesorc c @@ -144,9 +128,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### 3152013 -PREHOOK: query: -- Two partitions are created. One in memory, one on disk on creation. --- The one in memory will eventually exceed memory limit, but won't spill. -explain +PREHOOK: query: explain select count(*) from (select c.ctinyint from alltypesorc c @@ -154,9 +136,7 @@ select count(*) from on cd.cint = c.cint where c.cint < 2000000000) t1 PREHOOK: type: QUERY -POSTHOOK: query: -- Two partitions are created. One in memory, one on disk on creation. --- The one in memory will eventually exceed memory limit, but won't spill. -explain +POSTHOOK: query: explain select count(*) from (select c.ctinyint from alltypesorc c @@ -269,16 +249,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### 3152013 -PREHOOK: query: -- Base result for inner join -explain +PREHOOK: query: explain select count(*) from (select c.ctinyint from alltypesorc c inner join alltypesorc cd on cd.cint = c.cint) t1 PREHOOK: type: QUERY -POSTHOOK: query: -- Base result for inner join -explain +POSTHOOK: query: explain select count(*) from (select c.ctinyint from alltypesorc c @@ -388,18 +366,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### 3152013 -PREHOOK: query: -- 16 partitions are created: 3 in memory, 13 on disk on creation. --- 1 partition is spilled during first round processing, which ends up having 2 in memory, 14 on disk -explain +PREHOOK: query: explain select count(*) from (select c.ctinyint from alltypesorc c inner join alltypesorc cd on cd.cint = c.cint) t1 PREHOOK: type: QUERY -POSTHOOK: query: -- 16 partitions are created: 3 in memory, 13 on disk on creation. --- 1 partition is spilled during first round processing, which ends up having 2 in memory, 14 on disk -explain +POSTHOOK: query: explain select count(*) from (select c.ctinyint from alltypesorc c @@ -509,16 +483,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### 3152013 -PREHOOK: query: -- Base result for outer join -explain +PREHOOK: query: explain select count(*) from (select c.ctinyint from alltypesorc c left outer join alltypesorc cd on cd.cint = c.cint) t1 PREHOOK: type: QUERY -POSTHOOK: query: -- Base result for outer join -explain +POSTHOOK: query: explain select count(*) from (select c.ctinyint from alltypesorc c @@ -622,16 +594,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### 3155128 -PREHOOK: query: -- 32 partitions are created. 3 in memory, 29 on disk on creation. -explain +PREHOOK: query: explain select count(*) from (select c.ctinyint from alltypesorc c left outer join alltypesorc cd on cd.cint = c.cint) t1 PREHOOK: type: QUERY -POSTHOOK: query: -- 32 partitions are created. 3 in memory, 29 on disk on creation. -explain +POSTHOOK: query: explain select count(*) from (select c.ctinyint from alltypesorc c @@ -735,13 +705,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### 3155128 -PREHOOK: query: -- Partitioned table -create table parttbl (key string, value char(20)) partitioned by (dt char(10)) +PREHOOK: query: create table parttbl (key string, value char(20)) partitioned by (dt char(10)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@parttbl -POSTHOOK: query: -- Partitioned table -create table parttbl (key string, value char(20)) partitioned by (dt char(10)) +POSTHOOK: query: create table parttbl (key string, value char(20)) partitioned by (dt char(10)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@parttbl @@ -769,16 +737,14 @@ POSTHOOK: Input: default@src1 POSTHOOK: Output: default@parttbl@dt=2000-01-02 POSTHOOK: Lineage: parttbl PARTITION(dt=2000-01-02).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: parttbl PARTITION(dt=2000-01-02).value EXPRESSION [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- No spill, base result -explain +PREHOOK: query: explain select count(*) from (select p1.value from parttbl p1 inner join parttbl p2 on p1.key = p2.key) t1 PREHOOK: type: QUERY -POSTHOOK: query: -- No spill, base result -explain +POSTHOOK: query: explain select count(*) from (select p1.value from parttbl p1 @@ -892,16 +858,14 @@ POSTHOOK: Input: default@parttbl@dt=2000-01-01 POSTHOOK: Input: default@parttbl@dt=2000-01-02 #### A masked pattern was here #### 1217 -PREHOOK: query: -- No spill, 2 partitions created in memory -explain +PREHOOK: query: explain select count(*) from (select p1.value from parttbl p1 inner join parttbl p2 on p1.key = p2.key) t1 PREHOOK: type: QUERY -POSTHOOK: query: -- No spill, 2 partitions created in memory -explain +POSTHOOK: query: explain select count(*) from (select p1.value from parttbl p1 @@ -1016,16 +980,14 @@ POSTHOOK: Input: default@parttbl@dt=2000-01-01 POSTHOOK: Input: default@parttbl@dt=2000-01-02 #### A masked pattern was here #### 1217 -PREHOOK: query: -- Spill case base result -explain +PREHOOK: query: explain select count(*) from (select p1.value from parttbl p1 inner join parttbl p2 on p1.key = p2.key) t1 PREHOOK: type: QUERY -POSTHOOK: query: -- Spill case base result -explain +POSTHOOK: query: explain select count(*) from (select p1.value from parttbl p1 @@ -1139,16 +1101,14 @@ POSTHOOK: Input: default@parttbl@dt=2000-01-01 POSTHOOK: Input: default@parttbl@dt=2000-01-02 #### A masked pattern was here #### 1217 -PREHOOK: query: -- Spill case, one partition in memory, one spilled on creation -explain +PREHOOK: query: explain select count(*) from (select p1.value from parttbl p1 inner join parttbl p2 on p1.key = p2.key) t1 PREHOOK: type: QUERY -POSTHOOK: query: -- Spill case, one partition in memory, one spilled on creation -explain +POSTHOOK: query: explain select count(*) from (select p1.value from parttbl p1 @@ -1271,9 +1231,7 @@ POSTHOOK: query: drop table parttbl POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@parttbl POSTHOOK: Output: default@parttbl -PREHOOK: query: -- Test vectorization --- Test case borrowed from vector_decimal_mapjoin.q -CREATE TABLE decimal_mapjoin STORED AS ORC AS +PREHOOK: query: CREATE TABLE decimal_mapjoin STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, cint @@ -1282,9 +1240,7 @@ PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default PREHOOK: Output: default@decimal_mapjoin -POSTHOOK: query: -- Test vectorization --- Test case borrowed from vector_decimal_mapjoin.q -CREATE TABLE decimal_mapjoin STORED AS ORC AS +POSTHOOK: query: CREATE TABLE decimal_mapjoin STORED AS ORC AS SELECT cdouble, CAST (((cdouble*22.1)/37) AS DECIMAL(20,10)) AS cdecimal1, CAST (((cdouble*9.3)/13) AS DECIMAL(23,14)) AS cdecimal2, cint diff --git a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_2.q.out b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_2.q.out index de81828..1ad2f9e 100644 --- a/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_2.q.out +++ b/ql/src/test/results/clientpositive/llap/hybridgrace_hashjoin_2.q.out @@ -1,23 +1,17 @@ -PREHOOK: query: -- Hybrid Grace Hash Join --- Test n-way join -SELECT 1 +PREHOOK: query: SELECT 1 PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: -- Hybrid Grace Hash Join --- Test n-way join -SELECT 1 +POSTHOOK: query: SELECT 1 POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 1 -PREHOOK: query: -- 3-way mapjoin (1 big table, 2 small tables) -SELECT 1 +PREHOOK: query: SELECT 1 PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: -- 3-way mapjoin (1 big table, 2 small tables) -SELECT 1 +POSTHOOK: query: SELECT 1 POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### @@ -284,13 +278,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 428 -PREHOOK: query: -- 4-way mapjoin (1 big table, 3 small tables) -SELECT 1 +PREHOOK: query: SELECT 1 PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: -- 4-way mapjoin (1 big table, 3 small tables) -SELECT 1 +POSTHOOK: query: SELECT 1 POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### @@ -601,13 +593,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 5680 -PREHOOK: query: -- 2 sets of 3-way mapjoin under 2 different tasks -SELECT 1 +PREHOOK: query: SELECT 1 PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: -- 2 sets of 3-way mapjoin under 2 different tasks -SELECT 1 +POSTHOOK: query: SELECT 1 POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### @@ -1112,13 +1102,11 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 428 452 -PREHOOK: query: -- A chain of 2 sets of 3-way mapjoin under the same task -SELECT 1 +PREHOOK: query: SELECT 1 PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: -- A chain of 2 sets of 3-way mapjoin under the same task -SELECT 1 +POSTHOOK: query: SELECT 1 POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/llap/infer_bucket_sort_bucketed_table.q.out b/ql/src/test/results/clientpositive/llap/infer_bucket_sort_bucketed_table.q.out index 33d795b..1efb81b 100644 --- a/ql/src/test/results/clientpositive/llap/infer_bucket_sort_bucketed_table.q.out +++ b/ql/src/test/results/clientpositive/llap/infer_bucket_sort_bucketed_table.q.out @@ -1,27 +1,19 @@ -PREHOOK: query: -- Test writing to a bucketed table, the output should be bucketed by the bucketing key into the --- a number of files equal to the number of buckets -CREATE TABLE test_table_bucketed (key STRING, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE test_table_bucketed (key STRING, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (value) SORTED BY (value) INTO 3 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table_bucketed -POSTHOOK: query: -- Test writing to a bucketed table, the output should be bucketed by the bucketing key into the --- a number of files equal to the number of buckets -CREATE TABLE test_table_bucketed (key STRING, value STRING) PARTITIONED BY (part STRING) +POSTHOOK: query: CREATE TABLE test_table_bucketed (key STRING, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (value) SORTED BY (value) INTO 3 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table_bucketed -PREHOOK: query: -- Despite the fact that normally inferring would say this table is bucketed and sorted on key, --- this should be bucketed and sorted by value into 3 buckets -INSERT OVERWRITE TABLE test_table_bucketed PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_bucketed PARTITION (part = '1') SELECT key, count(1) FROM src GROUP BY KEY PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table_bucketed@part=1 -POSTHOOK: query: -- Despite the fact that normally inferring would say this table is bucketed and sorted on key, --- this should be bucketed and sorted by value into 3 buckets -INSERT OVERWRITE TABLE test_table_bucketed PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_bucketed PARTITION (part = '1') SELECT key, count(1) FROM src GROUP BY KEY POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -67,16 +59,12 @@ Bucket Columns: [value] Sort Columns: [Order(col:value, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- If the count(*) from sampling the buckets matches the count(*) from each file, the table is --- bucketed -SELECT COUNT(*) FROM test_table_bucketed TABLESAMPLE (BUCKET 1 OUT OF 3) WHERE part = '1' +PREHOOK: query: SELECT COUNT(*) FROM test_table_bucketed TABLESAMPLE (BUCKET 1 OUT OF 3) WHERE part = '1' PREHOOK: type: QUERY PREHOOK: Input: default@test_table_bucketed PREHOOK: Input: default@test_table_bucketed@part=1 #### A masked pattern was here #### -POSTHOOK: query: -- If the count(*) from sampling the buckets matches the count(*) from each file, the table is --- bucketed -SELECT COUNT(*) FROM test_table_bucketed TABLESAMPLE (BUCKET 1 OUT OF 3) WHERE part = '1' +POSTHOOK: query: SELECT COUNT(*) FROM test_table_bucketed TABLESAMPLE (BUCKET 1 OUT OF 3) WHERE part = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@test_table_bucketed POSTHOOK: Input: default@test_table_bucketed@part=1 diff --git a/ql/src/test/results/clientpositive/llap/input16_cc.q.out b/ql/src/test/results/clientpositive/llap/input16_cc.q.out index 9ea3472..1b61e16 100644 --- a/ql/src/test/results/clientpositive/llap/input16_cc.q.out +++ b/ql/src/test/results/clientpositive/llap/input16_cc.q.out @@ -1,12 +1,6 @@ -PREHOOK: query: -- TestSerDe is a user defined serde where the default delimiter is Ctrl-B --- the user is overwriting it with ctrlC - -DROP TABLE INPUT16_CC +PREHOOK: query: DROP TABLE INPUT16_CC PREHOOK: type: DROPTABLE -POSTHOOK: query: -- TestSerDe is a user defined serde where the default delimiter is Ctrl-B --- the user is overwriting it with ctrlC - -DROP TABLE INPUT16_CC +POSTHOOK: query: DROP TABLE INPUT16_CC POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE INPUT16_CC(KEY STRING, VALUE STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' with serdeproperties ('testserde.default.serialization.format'='\003', 'dummy.prop.not.used'='dummyy.val') STORED AS TEXTFILE PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/llap/insert1.q.out b/ql/src/test/results/clientpositive/llap/insert1.q.out index 4b30156..aa09585 100644 --- a/ql/src/test/results/clientpositive/llap/insert1.q.out +++ b/ql/src/test/results/clientpositive/llap/insert1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table insert1(key int, value string) stored as textfile +PREHOOK: query: create table insert1(key int, value string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@insert1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table insert1(key int, value string) stored as textfile +POSTHOOK: query: create table insert1(key int, value string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@insert1 @@ -136,12 +132,10 @@ STAGE PLANS: Stage: Stage-3 Stats-Aggr Operator -PREHOOK: query: -- HIVE-3465 -create database x +PREHOOK: query: create database x PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:x -POSTHOOK: query: -- HIVE-3465 -create database x +POSTHOOK: query: create database x POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:x PREHOOK: query: create table x.insert1(key int, value string) stored as textfile @@ -350,12 +344,10 @@ STAGE PLANS: Stage: Stage-5 Stats-Aggr Operator -PREHOOK: query: -- HIVE-3676 -CREATE DATABASE db2 +PREHOOK: query: CREATE DATABASE db2 PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:db2 -POSTHOOK: query: -- HIVE-3676 -CREATE DATABASE db2 +POSTHOOK: query: CREATE DATABASE db2 POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:db2 PREHOOK: query: USE db2 diff --git a/ql/src/test/results/clientpositive/llap/insert_dir_distcp.q.out b/ql/src/test/results/clientpositive/llap/insert_dir_distcp.q.out index b70fa01..fccbd63 100644 --- a/ql/src/test/results/clientpositive/llap/insert_dir_distcp.q.out +++ b/ql/src/test/results/clientpositive/llap/insert_dir_distcp.q.out @@ -1,12 +1,7 @@ -PREHOOK: query: -- see TEZ-2931 for using INFO logging - #### A masked pattern was here #### PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- see TEZ-2931 for using INFO logging - -#### A masked pattern was here #### POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/llap/insert_into_with_schema.q.out b/ql/src/test/results/clientpositive/llap/insert_into_with_schema.q.out index 541d2d4..89c192e 100644 --- a/ql/src/test/results/clientpositive/llap/insert_into_with_schema.q.out +++ b/ql/src/test/results/clientpositive/llap/insert_into_with_schema.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- set of tests HIVE-9481 - -drop database if exists x314 cascade +PREHOOK: query: drop database if exists x314 cascade PREHOOK: type: DROPDATABASE -POSTHOOK: query: -- set of tests HIVE-9481 - -drop database if exists x314 cascade +POSTHOOK: query: drop database if exists x314 cascade POSTHOOK: type: DROPDATABASE PREHOOK: query: create database x314 PREHOOK: type: CREATEDATABASE @@ -58,13 +54,11 @@ POSTHOOK: type: QUERY POSTHOOK: Output: x314@source POSTHOOK: Lineage: source.s1 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ] POSTHOOK: Lineage: source.s2 EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] -PREHOOK: query: -- expect source to contain 1 row (1,2) -select * from source +PREHOOK: query: select * from source PREHOOK: type: QUERY PREHOOK: Input: x314@source #### A masked pattern was here #### -POSTHOOK: query: -- expect source to contain 1 row (1,2) -select * from source +POSTHOOK: query: select * from source POSTHOOK: type: QUERY POSTHOOK: Input: x314@source #### A masked pattern was here #### @@ -80,25 +74,21 @@ POSTHOOK: Output: x314@target1 POSTHOOK: Lineage: target1.x SIMPLE [(source)source.FieldSchema(name:s2, type:int, comment:null), ] POSTHOOK: Lineage: target1.y SIMPLE [] POSTHOOK: Lineage: target1.z SIMPLE [(source)source.FieldSchema(name:s1, type:int, comment:null), ] -PREHOOK: query: -- expect target1 to contain 1 row (2,NULL,1) -select * from target1 +PREHOOK: query: select * from target1 PREHOOK: type: QUERY PREHOOK: Input: x314@target1 #### A masked pattern was here #### -POSTHOOK: query: -- expect target1 to contain 1 row (2,NULL,1) -select * from target1 +POSTHOOK: query: select * from target1 POSTHOOK: type: QUERY POSTHOOK: Input: x314@target1 #### A masked pattern was here #### 2 NULL 1 -PREHOOK: query: -- note that schema spec for target1 and target2 are different -from source insert into target1(x,y) select * insert into target2(x,z) select s2,s1 +PREHOOK: query: from source insert into target1(x,y) select * insert into target2(x,z) select s2,s1 PREHOOK: type: QUERY PREHOOK: Input: x314@source PREHOOK: Output: x314@target1 PREHOOK: Output: x314@target2 -POSTHOOK: query: -- note that schema spec for target1 and target2 are different -from source insert into target1(x,y) select * insert into target2(x,z) select s2,s1 +POSTHOOK: query: from source insert into target1(x,y) select * insert into target2(x,z) select s2,s1 POSTHOOK: type: QUERY POSTHOOK: Input: x314@source POSTHOOK: Output: x314@target1 @@ -109,25 +99,21 @@ POSTHOOK: Lineage: target1.z SIMPLE [] POSTHOOK: Lineage: target2.x SIMPLE [(source)source.FieldSchema(name:s2, type:int, comment:null), ] POSTHOOK: Lineage: target2.y SIMPLE [] POSTHOOK: Lineage: target2.z SIMPLE [(source)source.FieldSchema(name:s1, type:int, comment:null), ] -PREHOOK: query: --expect target1 to have 2rows (2,NULL,1), (1,2,NULL) -select * from target1 order by x,y,z +PREHOOK: query: select * from target1 order by x,y,z PREHOOK: type: QUERY PREHOOK: Input: x314@target1 #### A masked pattern was here #### -POSTHOOK: query: --expect target1 to have 2rows (2,NULL,1), (1,2,NULL) -select * from target1 order by x,y,z +POSTHOOK: query: select * from target1 order by x,y,z POSTHOOK: type: QUERY POSTHOOK: Input: x314@target1 #### A masked pattern was here #### 1 2 NULL 2 NULL 1 -PREHOOK: query: -- expect target2 to have 1 row: (2,NULL,1) -select * from target2 +PREHOOK: query: select * from target2 PREHOOK: type: QUERY PREHOOK: Input: x314@target2 #### A masked pattern was here #### -POSTHOOK: query: -- expect target2 to have 1 row: (2,NULL,1) -select * from target2 +POSTHOOK: query: select * from target2 POSTHOOK: type: QUERY POSTHOOK: Input: x314@target2 #### A masked pattern was here #### @@ -148,26 +134,22 @@ POSTHOOK: Lineage: target1.z SIMPLE [(source)source.FieldSchema(name:s2, type:in POSTHOOK: Lineage: target2.x EXPRESSION [] POSTHOOK: Lineage: target2.y SIMPLE [(source)source.FieldSchema(name:s1, type:int, comment:null), ] POSTHOOK: Lineage: target2.z SIMPLE [(source)source.FieldSchema(name:s2, type:int, comment:null), ] -PREHOOK: query: -- expect target1 to have 3 rows: (2,NULL,1), (1,2,NULL), (NULL, 1,2) -select * from target1 order by x,y,z +PREHOOK: query: select * from target1 order by x,y,z PREHOOK: type: QUERY PREHOOK: Input: x314@target1 #### A masked pattern was here #### -POSTHOOK: query: -- expect target1 to have 3 rows: (2,NULL,1), (1,2,NULL), (NULL, 1,2) -select * from target1 order by x,y,z +POSTHOOK: query: select * from target1 order by x,y,z POSTHOOK: type: QUERY POSTHOOK: Input: x314@target1 #### A masked pattern was here #### NULL 1 2 1 2 NULL 2 NULL 1 -PREHOOK: query: -- expect target2 to have 2 rows: (2,NULL,1), (NULL, 1,2) -select * from target2 order by x,y,z +PREHOOK: query: select * from target2 order by x,y,z PREHOOK: type: QUERY PREHOOK: Input: x314@target2 #### A masked pattern was here #### -POSTHOOK: query: -- expect target2 to have 2 rows: (2,NULL,1), (NULL, 1,2) -select * from target2 order by x,y,z +POSTHOOK: query: select * from target2 order by x,y,z POSTHOOK: type: QUERY POSTHOOK: Input: x314@target2 #### A masked pattern was here #### @@ -194,24 +176,20 @@ POSTHOOK: Output: x314@target3 POSTHOOK: Lineage: target3.x SIMPLE [(source)source.FieldSchema(name:s1, type:int, comment:null), ] POSTHOOK: Lineage: target3.y SIMPLE [] POSTHOOK: Lineage: target3.z SIMPLE [(source2)source2.FieldSchema(name:s2, type:int, comment:null), ] -PREHOOK: query: --expect target3 to have 1 row (1,NULL,NULL) -select * from target3 +PREHOOK: query: select * from target3 PREHOOK: type: QUERY PREHOOK: Input: x314@target3 #### A masked pattern was here #### -POSTHOOK: query: --expect target3 to have 1 row (1,NULL,NULL) -select * from target3 +POSTHOOK: query: select * from target3 POSTHOOK: type: QUERY POSTHOOK: Input: x314@target3 #### A masked pattern was here #### 1 NULL NULL -PREHOOK: query: -- partitioned tables -CREATE TABLE pageviews (userid VARCHAR(64), link STRING, source STRING) PARTITIONED BY (datestamp STRING, i int) CLUSTERED BY (userid) INTO 4 BUCKETS STORED AS ORC +PREHOOK: query: CREATE TABLE pageviews (userid VARCHAR(64), link STRING, source STRING) PARTITIONED BY (datestamp STRING, i int) CLUSTERED BY (userid) INTO 4 BUCKETS STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:x314 PREHOOK: Output: x314@pageviews -POSTHOOK: query: -- partitioned tables -CREATE TABLE pageviews (userid VARCHAR(64), link STRING, source STRING) PARTITIONED BY (datestamp STRING, i int) CLUSTERED BY (userid) INTO 4 BUCKETS STORED AS ORC +POSTHOOK: query: CREATE TABLE pageviews (userid VARCHAR(64), link STRING, source STRING) PARTITIONED BY (datestamp STRING, i int) CLUSTERED BY (userid) INTO 4 BUCKETS STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:x314 POSTHOOK: Output: x314@pageviews @@ -224,31 +202,21 @@ POSTHOOK: Output: x314@pageviews@datestamp=2014-09-23/i=1 POSTHOOK: Lineage: pageviews PARTITION(datestamp=2014-09-23,i=1).link SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ] POSTHOOK: Lineage: pageviews PARTITION(datestamp=2014-09-23,i=1).source SIMPLE [] POSTHOOK: Lineage: pageviews PARTITION(datestamp=2014-09-23,i=1).userid EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] -PREHOOK: query: -- expect 1 row: ('jsmith', 'mail.com', NULL) in partition '2014-09-23'/'1' -select * from pageviews +PREHOOK: query: select * from pageviews PREHOOK: type: QUERY PREHOOK: Input: x314@pageviews PREHOOK: Input: x314@pageviews@datestamp=2014-09-23/i=1 #### A masked pattern was here #### -POSTHOOK: query: -- expect 1 row: ('jsmith', 'mail.com', NULL) in partition '2014-09-23'/'1' -select * from pageviews +POSTHOOK: query: select * from pageviews POSTHOOK: type: QUERY POSTHOOK: Input: x314@pageviews POSTHOOK: Input: x314@pageviews@datestamp=2014-09-23/i=1 #### A masked pattern was here #### jsmith mail.com NULL 2014-09-23 1 -PREHOOK: query: -- dynamic partitioning - - - -INSERT INTO TABLE pageviews PARTITION (datestamp='2014-09-23',i)(userid,i,link) VALUES ('jsmith', 7, '7mail.com') +PREHOOK: query: INSERT INTO TABLE pageviews PARTITION (datestamp='2014-09-23',i)(userid,i,link) VALUES ('jsmith', 7, '7mail.com') PREHOOK: type: QUERY PREHOOK: Output: x314@pageviews@datestamp=2014-09-23 -POSTHOOK: query: -- dynamic partitioning - - - -INSERT INTO TABLE pageviews PARTITION (datestamp='2014-09-23',i)(userid,i,link) VALUES ('jsmith', 7, '7mail.com') +POSTHOOK: query: INSERT INTO TABLE pageviews PARTITION (datestamp='2014-09-23',i)(userid,i,link) VALUES ('jsmith', 7, '7mail.com') POSTHOOK: type: QUERY POSTHOOK: Output: x314@pageviews@datestamp=2014-09-23/i=7 POSTHOOK: Lineage: pageviews PARTITION(datestamp=2014-09-23,i=7).link SIMPLE [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col3, type:string, comment:), ] @@ -272,24 +240,16 @@ POSTHOOK: Output: x314@pageviews@datestamp=2014-09-24/i=19 POSTHOOK: Lineage: pageviews PARTITION(datestamp=2014-09-24,i=19).link SIMPLE [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col3, type:string, comment:), ] POSTHOOK: Lineage: pageviews PARTITION(datestamp=2014-09-24,i=19).source SIMPLE [] POSTHOOK: Lineage: pageviews PARTITION(datestamp=2014-09-24,i=19).userid EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, type:string, comment:), ] -PREHOOK: query: -- here the 'datestamp' partition column is not provided and will be NULL-filled -INSERT INTO TABLE pageviews PARTITION (datestamp,i)(userid,i,link) VALUES ('jsmith', 23, '23mail.com') +PREHOOK: query: INSERT INTO TABLE pageviews PARTITION (datestamp,i)(userid,i,link) VALUES ('jsmith', 23, '23mail.com') PREHOOK: type: QUERY PREHOOK: Output: x314@pageviews -POSTHOOK: query: -- here the 'datestamp' partition column is not provided and will be NULL-filled -INSERT INTO TABLE pageviews PARTITION (datestamp,i)(userid,i,link) VALUES ('jsmith', 23, '23mail.com') +POSTHOOK: query: INSERT INTO TABLE pageviews PARTITION (datestamp,i)(userid,i,link) VALUES ('jsmith', 23, '23mail.com') POSTHOOK: type: QUERY POSTHOOK: Output: x314@pageviews@datestamp=__HIVE_DEFAULT_PARTITION__/i=23 POSTHOOK: Lineage: pageviews PARTITION(datestamp=__HIVE_DEFAULT_PARTITION__,i=23).link SIMPLE [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col3, type:string, comment:), ] POSTHOOK: Lineage: pageviews PARTITION(datestamp=__HIVE_DEFAULT_PARTITION__,i=23).source SIMPLE [] POSTHOOK: Lineage: pageviews PARTITION(datestamp=__HIVE_DEFAULT_PARTITION__,i=23).userid EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, type:string, comment:), ] -PREHOOK: query: -- expect 5 rows: --- expect ('jsmith', 'mail.com', NULL) in partition '2014-09-23'/'1' --- expect ('jsmith', '7mail.com', NULL) in partition '2014-09-23'/'7' --- expect ('jsmith', '17mail.com', NULL) in partition '2014-09-23'/'17' --- expect ('jsmith', '19mail.com', NULL) in partition '2014-09-24'/'19' --- expect ('jsmith', '23mail.com', NULL) in partition '__HIVE_DEFAULT_PARTITION__'/'23' -select * from pageviews order by link +PREHOOK: query: select * from pageviews order by link PREHOOK: type: QUERY PREHOOK: Input: x314@pageviews PREHOOK: Input: x314@pageviews@datestamp=2014-09-23/i=1 @@ -298,13 +258,7 @@ PREHOOK: Input: x314@pageviews@datestamp=2014-09-23/i=7 PREHOOK: Input: x314@pageviews@datestamp=2014-09-24/i=19 PREHOOK: Input: x314@pageviews@datestamp=__HIVE_DEFAULT_PARTITION__/i=23 #### A masked pattern was here #### -POSTHOOK: query: -- expect 5 rows: --- expect ('jsmith', 'mail.com', NULL) in partition '2014-09-23'/'1' --- expect ('jsmith', '7mail.com', NULL) in partition '2014-09-23'/'7' --- expect ('jsmith', '17mail.com', NULL) in partition '2014-09-23'/'17' --- expect ('jsmith', '19mail.com', NULL) in partition '2014-09-24'/'19' --- expect ('jsmith', '23mail.com', NULL) in partition '__HIVE_DEFAULT_PARTITION__'/'23' -select * from pageviews order by link +POSTHOOK: query: select * from pageviews order by link POSTHOOK: type: QUERY POSTHOOK: Input: x314@pageviews POSTHOOK: Input: x314@pageviews@datestamp=2014-09-23/i=1 diff --git a/ql/src/test/results/clientpositive/llap/join0.q.out b/ql/src/test/results/clientpositive/llap/join0.q.out index 82440e7..842b63e 100644 --- a/ql/src/test/results/clientpositive/llap/join0.q.out +++ b/ql/src/test/results/clientpositive/llap/join0.q.out @@ -1,7 +1,5 @@ Warning: Shuffle Join MERGEJOIN[15][tables = [src1, src2]] in Stage 'Reducer 2' is a cross product -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 FROM (SELECT * FROM src WHERE src.key < 10) src1 @@ -9,9 +7,7 @@ SELECT src1.key as k1, src1.value as v1, (SELECT * FROM src WHERE src.key < 10) src2 SORT BY k1, v1, k2, v2 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 FROM (SELECT * FROM src WHERE src.key < 10) src1 diff --git a/ql/src/test/results/clientpositive/llap/join1.q.out b/ql/src/test/results/clientpositive/llap/join1.q.out index cb0a735..d79a405 100644 --- a/ql/src/test/results/clientpositive/llap/join1.q.out +++ b/ql/src/test/results/clientpositive/llap/join1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/llap/join32_lessSize.q.out b/ql/src/test/results/clientpositive/llap/join32_lessSize.q.out index 46d6281..15eb751 100644 --- a/ql/src/test/results/clientpositive/llap/join32_lessSize.q.out +++ b/ql/src/test/results/clientpositive/llap/join32_lessSize.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 @@ -18,17 +14,13 @@ POSTHOOK: query: CREATE TABLE dest_j2(key STRING, value STRING, val2 STRING) STO POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j2 -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11) PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) diff --git a/ql/src/test/results/clientpositive/load_binary_data.q.out b/ql/src/test/results/clientpositive/load_binary_data.q.out index fb7f42b..c6d4e61 100644 --- a/ql/src/test/results/clientpositive/load_binary_data.q.out +++ b/ql/src/test/results/clientpositive/load_binary_data.q.out @@ -12,15 +12,11 @@ STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@mytable -PREHOOK: query: -- this query loads native binary data, stores in a table and then queries it. Note that string.txt contains binary data. Also uses transform clause and then length udf. - -LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@mytable -POSTHOOK: query: -- this query loads native binary data, stores in a table and then queries it. Note that string.txt contains binary data. Also uses transform clause and then length udf. - -LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/string.txt' INTO TABLE mytable POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@mytable diff --git a/ql/src/test/results/clientpositive/load_dyn_part1.q.out b/ql/src/test/results/clientpositive/load_dyn_part1.q.out index e7dace5..84d806d 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part1.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part1.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +PREHOOK: query: show partitions srcpart PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@srcpart -POSTHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +POSTHOOK: query: show partitions srcpart POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@srcpart ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/load_dyn_part10.q.out b/ql/src/test/results/clientpositive/load_dyn_part10.q.out index ca388b9..99d3572 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part10.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part10.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +PREHOOK: query: show partitions srcpart PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@srcpart -POSTHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +POSTHOOK: query: show partitions srcpart POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@srcpart ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/load_dyn_part14.q.out b/ql/src/test/results/clientpositive/load_dyn_part14.q.out index 7ccb9c3..a6a5c63 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part14.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part14.q.out @@ -1,19 +1,9 @@ -PREHOOK: query: -- EXCLUDE_OS_WINDOWS --- excluded on windows because of difference in file name encoding logic - --- SORT_QUERY_RESULTS - -create table if not exists nzhang_part14 (key string) +PREHOOK: query: create table if not exists nzhang_part14 (key string) partitioned by (value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@nzhang_part14 -POSTHOOK: query: -- EXCLUDE_OS_WINDOWS --- excluded on windows because of difference in file name encoding logic - --- SORT_QUERY_RESULTS - -create table if not exists nzhang_part14 (key string) +POSTHOOK: query: create table if not exists nzhang_part14 (key string) partitioned by (value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/load_dyn_part2.q.out b/ql/src/test/results/clientpositive/load_dyn_part2.q.out index 0c5fe6b..93778a2 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part2.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part2.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table if not exists nzhang_part_bucket (key string, value string) +PREHOOK: query: create table if not exists nzhang_part_bucket (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 10 buckets PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@nzhang_part_bucket -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table if not exists nzhang_part_bucket (key string, value string) +POSTHOOK: query: create table if not exists nzhang_part_bucket (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 10 buckets POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/load_dyn_part3.q.out b/ql/src/test/results/clientpositive/load_dyn_part3.q.out index 3242c3d..3849100 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part3.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part3.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +PREHOOK: query: show partitions srcpart PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@srcpart -POSTHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +POSTHOOK: query: show partitions srcpart POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@srcpart ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/load_dyn_part4.q.out b/ql/src/test/results/clientpositive/load_dyn_part4.q.out index d24875f..40b0bbb 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part4.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part4.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +PREHOOK: query: show partitions srcpart PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@srcpart -POSTHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +POSTHOOK: query: show partitions srcpart POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@srcpart ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/load_dyn_part8.q.out b/ql/src/test/results/clientpositive/load_dyn_part8.q.out index a8247f5..699c85d 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part8.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part8.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +PREHOOK: query: show partitions srcpart PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@srcpart -POSTHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +POSTHOOK: query: show partitions srcpart POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@srcpart ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/load_dyn_part9.q.out b/ql/src/test/results/clientpositive/load_dyn_part9.q.out index 300f41e..414e784 100644 --- a/ql/src/test/results/clientpositive/load_dyn_part9.q.out +++ b/ql/src/test/results/clientpositive/load_dyn_part9.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +PREHOOK: query: show partitions srcpart PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@srcpart -POSTHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +POSTHOOK: query: show partitions srcpart POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@srcpart ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/load_file_with_space_in_the_name.q.out b/ql/src/test/results/clientpositive/load_file_with_space_in_the_name.q.out index 2072651..78dee61 100644 --- a/ql/src/test/results/clientpositive/load_file_with_space_in_the_name.q.out +++ b/ql/src/test/results/clientpositive/load_file_with_space_in_the_name.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- test for loading into tables with the file with space in the name - - -CREATE TABLE load_file_with_space_in_the_name(name STRING, age INT) +PREHOOK: query: CREATE TABLE load_file_with_space_in_the_name(name STRING, age INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@load_file_with_space_in_the_name -POSTHOOK: query: -- test for loading into tables with the file with space in the name - - -CREATE TABLE load_file_with_space_in_the_name(name STRING, age INT) +POSTHOOK: query: CREATE TABLE load_file_with_space_in_the_name(name STRING, age INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@load_file_with_space_in_the_name diff --git a/ql/src/test/results/clientpositive/load_fs_overwrite.q.out b/ql/src/test/results/clientpositive/load_fs_overwrite.q.out index cafcdde..8213728 100644 --- a/ql/src/test/results/clientpositive/load_fs_overwrite.q.out +++ b/ql/src/test/results/clientpositive/load_fs_overwrite.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: --HIVE 6209 - -drop table target +PREHOOK: query: drop table target PREHOOK: type: DROPTABLE -POSTHOOK: query: --HIVE 6209 - -drop table target +POSTHOOK: query: drop table target POSTHOOK: type: DROPTABLE PREHOOK: query: drop table temp PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/louter_join_ppr.q.out b/ql/src/test/results/clientpositive/louter_join_ppr.q.out index c1319f8..d652f23 100644 --- a/ql/src/test/results/clientpositive/louter_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/louter_join_ppr.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED FROM src a LEFT OUTER JOIN @@ -9,9 +7,7 @@ EXPLAIN EXTENDED SELECT a.key, a.value, b.key, b.value WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED FROM src a LEFT OUTER JOIN diff --git a/ql/src/test/results/clientpositive/mapjoin1.q.out b/ql/src/test/results/clientpositive/mapjoin1.q.out index ecd4ce0..5601bc0 100644 --- a/ql/src/test/results/clientpositive/mapjoin1.q.out +++ b/ql/src/test/results/clientpositive/mapjoin1.q.out @@ -21,12 +21,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 1114788.0 -PREHOOK: query: -- const filter on outer join -EXPLAIN +PREHOOK: query: EXPLAIN SELECT /*+ MAPJOIN(a) */ * FROM src a RIGHT OUTER JOIN src b on a.key=b.key AND true limit 10 PREHOOK: type: QUERY -POSTHOOK: query: -- const filter on outer join -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT /*+ MAPJOIN(a) */ * FROM src a RIGHT OUTER JOIN src b on a.key=b.key AND true limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -112,12 +110,10 @@ POSTHOOK: Input: default@src 165 val_165 165 val_165 165 val_165 165 val_165 409 val_409 409 val_409 -PREHOOK: query: -- func filter on outer join -EXPLAIN +PREHOOK: query: EXPLAIN SELECT /*+ MAPJOIN(a) */ * FROM src a RIGHT OUTER JOIN src b on a.key=b.key AND b.key * 10 < '1000' limit 10 PREHOOK: type: QUERY -POSTHOOK: query: -- func filter on outer join -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT /*+ MAPJOIN(a) */ * FROM src a RIGHT OUTER JOIN src b on a.key=b.key AND b.key * 10 < '1000' limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -203,13 +199,11 @@ NULL NULL 255 val_255 NULL NULL 278 val_278 98 val_98 98 val_98 98 val_98 98 val_98 -PREHOOK: query: -- field filter on outer join -EXPLAIN +PREHOOK: query: EXPLAIN SELECT /*+ MAPJOIN(a) */ * FROM src a RIGHT OUTER JOIN (select key, named_struct('key', key, 'value', value) as kv from src) b on a.key=b.key AND b.kv.key > 200 limit 10 PREHOOK: type: QUERY -POSTHOOK: query: -- field filter on outer join -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT /*+ MAPJOIN(a) */ * FROM src a RIGHT OUTER JOIN (select key, named_struct('key', key, 'value', value) as kv from src) b on a.key=b.key AND b.kv.key > 200 limit 10 POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/mapjoin_filter_on_outerjoin.q.out b/ql/src/test/results/clientpositive/mapjoin_filter_on_outerjoin.q.out index b201e19..a6f32e9 100644 --- a/ql/src/test/results/clientpositive/mapjoin_filter_on_outerjoin.q.out +++ b/ql/src/test/results/clientpositive/mapjoin_filter_on_outerjoin.q.out @@ -1,8 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - ---HIVE-2101 mapjoin sometimes gives wrong results if there is a filter in the on condition - -SELECT * FROM src1 +PREHOOK: query: SELECT * FROM src1 RIGHT OUTER JOIN src1 src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) JOIN src src3 ON (src2.key = src3.key AND src3.key < 300) SORT BY src1.key, src2.key, src3.key @@ -10,11 +6,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@src1 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - ---HIVE-2101 mapjoin sometimes gives wrong results if there is a filter in the on condition - -SELECT * FROM src1 +POSTHOOK: query: SELECT * FROM src1 RIGHT OUTER JOIN src1 src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) JOIN src src3 ON (src2.key = src3.key AND src3.key < 300) SORT BY src1.key, src2.key, src3.key diff --git a/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out b/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out index 9d5dc25..150e9f0 100644 --- a/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out @@ -1,14 +1,6 @@ -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - --- SORT_QUERY_RESULTS - -explain extended select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) +PREHOOK: query: explain extended select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - --- SORT_QUERY_RESULTS - -explain extended select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) +POSTHOOK: query: explain extended select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-7 is a root stage diff --git a/ql/src/test/results/clientpositive/mapjoin_memcheck.q.out b/ql/src/test/results/clientpositive/mapjoin_memcheck.q.out index 85882fa..074dfc8 100644 --- a/ql/src/test/results/clientpositive/mapjoin_memcheck.q.out +++ b/ql/src/test/results/clientpositive/mapjoin_memcheck.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table src0 like src +PREHOOK: query: create table src0 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src0 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table src0 like src +POSTHOOK: query: create table src0 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src0 diff --git a/ql/src/test/results/clientpositive/mapjoin_subquery.q.out b/ql/src/test/results/clientpositive/mapjoin_subquery.q.out index 9640179..1a44d18 100644 --- a/ql/src/test/results/clientpositive/mapjoin_subquery.q.out +++ b/ql/src/test/results/clientpositive/mapjoin_subquery.q.out @@ -1,17 +1,11 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT subq.key1, z.value FROM (SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2 FROM src1 x JOIN src y ON (x.key = y.key)) subq JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS --- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT subq.key1, z.value FROM (SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2 diff --git a/ql/src/test/results/clientpositive/mapjoin_subquery2.q.out b/ql/src/test/results/clientpositive/mapjoin_subquery2.q.out index 78819a3..beace92 100644 --- a/ql/src/test/results/clientpositive/mapjoin_subquery2.q.out +++ b/ql/src/test/results/clientpositive/mapjoin_subquery2.q.out @@ -64,18 +64,14 @@ POSTHOOK: query: load data local inpath '../../data/files/z.txt' INTO TABLE z POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@z -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT subq.key1, subq.value1, subq.key2, subq.value2, z.id, z.name FROM (SELECT x.id as key1, x.name as value1, y.id as key2, y.name as value2 FROM y JOIN x ON (x.id = y.id)) subq JOIN z ON (subq.key1 = z.id) PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT subq.key1, subq.value1, subq.key2, subq.value2, z.id, z.name FROM (SELECT x.id as key1, x.name as value1, y.id as key2, y.name as value2 diff --git a/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out b/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out index fe76be1..60a1bfd 100644 --- a/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out +++ b/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - ---HIVE-2101 mapjoin sometimes gives wrong results if there is a filter in the on condition - -create table dest_1 (key STRING, value STRING) stored as textfile +PREHOOK: query: create table dest_1 (key STRING, value STRING) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - ---HIVE-2101 mapjoin sometimes gives wrong results if there is a filter in the on condition - -create table dest_1 (key STRING, value STRING) stored as textfile +POSTHOOK: query: create table dest_1 (key STRING, value STRING) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_1 diff --git a/ql/src/test/results/clientpositive/masking_4.q.out b/ql/src/test/results/clientpositive/masking_4.q.out index 0a81c44..9ddba3a 100644 --- a/ql/src/test/results/clientpositive/masking_4.q.out +++ b/ql/src/test/results/clientpositive/masking_4.q.out @@ -64,15 +64,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: --should mask masking_test - -explain +PREHOOK: query: explain with q1 as ( select * from masking_test where key = '5') select * from q1 PREHOOK: type: QUERY -POSTHOOK: query: --should mask masking_test - -explain +POSTHOOK: query: explain with q1 as ( select * from masking_test where key = '5') select * from q1 POSTHOOK: type: QUERY @@ -108,15 +104,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: --should not mask masking_test_subq - -explain +PREHOOK: query: explain with masking_test_subq as ( select * from masking_test where key = '5') select * from masking_test_subq PREHOOK: type: QUERY -POSTHOOK: query: --should not mask masking_test_subq - -explain +POSTHOOK: query: explain with masking_test_subq as ( select * from masking_test where key = '5') select * from masking_test_subq POSTHOOK: type: QUERY @@ -152,15 +144,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: --should mask masking_test_subq - -explain +PREHOOK: query: explain with q1 as ( select * from masking_test where key = '5') select * from masking_test_subq PREHOOK: type: QUERY -POSTHOOK: query: --should mask masking_test_subq - -explain +POSTHOOK: query: explain with q1 as ( select * from masking_test where key = '5') select * from masking_test_subq POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/masking_disablecbo_4.q.out b/ql/src/test/results/clientpositive/masking_disablecbo_4.q.out index 698c797..9d05dbe 100644 --- a/ql/src/test/results/clientpositive/masking_disablecbo_4.q.out +++ b/ql/src/test/results/clientpositive/masking_disablecbo_4.q.out @@ -64,15 +64,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: --should mask masking_test - -explain +PREHOOK: query: explain with q1 as ( select * from masking_test where key = '5') select * from q1 PREHOOK: type: QUERY -POSTHOOK: query: --should mask masking_test - -explain +POSTHOOK: query: explain with q1 as ( select * from masking_test where key = '5') select * from q1 POSTHOOK: type: QUERY @@ -108,15 +104,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: --should not mask masking_test_subq - -explain +PREHOOK: query: explain with masking_test_subq as ( select * from masking_test where key = '5') select * from masking_test_subq PREHOOK: type: QUERY -POSTHOOK: query: --should not mask masking_test_subq - -explain +POSTHOOK: query: explain with masking_test_subq as ( select * from masking_test where key = '5') select * from masking_test_subq POSTHOOK: type: QUERY @@ -152,15 +144,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: --should mask masking_test_subq - -explain +PREHOOK: query: explain with q1 as ( select * from masking_test where key = '5') select * from masking_test_subq PREHOOK: type: QUERY -POSTHOOK: query: --should mask masking_test_subq - -explain +POSTHOOK: query: explain with q1 as ( select * from masking_test where key = '5') select * from masking_test_subq POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/merge1.q.out b/ql/src/test/results/clientpositive/merge1.q.out index 94089fc..2487bf7 100644 --- a/ql/src/test/results/clientpositive/merge1.q.out +++ b/ql/src/test/results/clientpositive/merge1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table dest1(key int, val int) +PREHOOK: query: create table dest1(key int, val int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table dest1(key int, val int) +POSTHOOK: query: create table dest1(key int, val int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/merge2.q.out b/ql/src/test/results/clientpositive/merge2.q.out index a3a0e8e..a8b4bd5 100644 --- a/ql/src/test/results/clientpositive/merge2.q.out +++ b/ql/src/test/results/clientpositive/merge2.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table test1(key int, val int) +PREHOOK: query: create table test1(key int, val int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table test1(key int, val int) +POSTHOOK: query: create table test1(key int, val int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test1 diff --git a/ql/src/test/results/clientpositive/merge3.q.out b/ql/src/test/results/clientpositive/merge3.q.out index 7f879db..2509846 100644 --- a/ql/src/test/results/clientpositive/merge3.q.out +++ b/ql/src/test/results/clientpositive/merge3.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table merge_src as +PREHOOK: query: create table merge_src as select key, value from srcpart where ds is not null PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@srcpart @@ -10,9 +8,7 @@ PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 PREHOOK: Output: database:default PREHOOK: Output: default@merge_src -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table merge_src as +POSTHOOK: query: create table merge_src as select key, value from srcpart where ds is not null POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@srcpart diff --git a/ql/src/test/results/clientpositive/merge4.q.out b/ql/src/test/results/clientpositive/merge4.q.out index 08e4455..182c6a8 100644 --- a/ql/src/test/results/clientpositive/merge4.q.out +++ b/ql/src/test/results/clientpositive/merge4.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table nzhang_part like srcpart +PREHOOK: query: create table nzhang_part like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@nzhang_part -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table nzhang_part like srcpart +POSTHOOK: query: create table nzhang_part like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_part diff --git a/ql/src/test/results/clientpositive/merge_dynamic_partition.q.out b/ql/src/test/results/clientpositive/merge_dynamic_partition.q.out index 07102b1..a777fe0 100644 --- a/ql/src/test/results/clientpositive/merge_dynamic_partition.q.out +++ b/ql/src/test/results/clientpositive/merge_dynamic_partition.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table srcpart_merge_dp like srcpart +PREHOOK: query: create table srcpart_merge_dp like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@srcpart_merge_dp -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table srcpart_merge_dp like srcpart +POSTHOOK: query: create table srcpart_merge_dp like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@srcpart_merge_dp diff --git a/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out b/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out index 86978f3..055e07a 100644 --- a/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out +++ b/ql/src/test/results/clientpositive/merge_dynamic_partition3.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table srcpart_merge_dp like srcpart +PREHOOK: query: create table srcpart_merge_dp like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@srcpart_merge_dp -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table srcpart_merge_dp like srcpart +POSTHOOK: query: create table srcpart_merge_dp like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@srcpart_merge_dp diff --git a/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out b/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out index ca168c8..cbeaf42 100644 --- a/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out +++ b/ql/src/test/results/clientpositive/merge_dynamic_partition4.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- this test verifies that the block merge task that can follow a query to generate dynamic --- partitions does not produce incorrect results by dropping partitions - -create table srcpart_merge_dp like srcpart +PREHOOK: query: create table srcpart_merge_dp like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@srcpart_merge_dp -POSTHOOK: query: -- this test verifies that the block merge task that can follow a query to generate dynamic --- partitions does not produce incorrect results by dropping partitions - -create table srcpart_merge_dp like srcpart +POSTHOOK: query: create table srcpart_merge_dp like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@srcpart_merge_dp diff --git a/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out b/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out index 38e1ad8..5a562f4 100644 --- a/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out +++ b/ql/src/test/results/clientpositive/merge_dynamic_partition5.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- this is to test the case where some dynamic partitions are merged and some are moved - -create table srcpart_merge_dp like srcpart +PREHOOK: query: create table srcpart_merge_dp like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@srcpart_merge_dp -POSTHOOK: query: -- this is to test the case where some dynamic partitions are merged and some are moved - -create table srcpart_merge_dp like srcpart +POSTHOOK: query: create table srcpart_merge_dp like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@srcpart_merge_dp diff --git a/ql/src/test/results/clientpositive/mergejoin.q.out b/ql/src/test/results/clientpositive/mergejoin.q.out index 97891c2..33f8740 100644 --- a/ql/src/test/results/clientpositive/mergejoin.q.out +++ b/ql/src/test/results/clientpositive/mergejoin.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain select * from src a join src1 b on a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain select * from src a join src1 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/mergejoins.q.out b/ql/src/test/results/clientpositive/mergejoins.q.out index aad2cbb..1023f61 100644 --- a/ql/src/test/results/clientpositive/mergejoins.q.out +++ b/ql/src/test/results/clientpositive/mergejoins.q.out @@ -184,11 +184,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: --HIVE-3070 filter on outer join condition removed while merging join tree -explain select * from src a join src b on a.key=b.key left outer join src c on b.key=c.key and b.key<10 +PREHOOK: query: explain select * from src a join src b on a.key=b.key left outer join src c on b.key=c.key and b.key<10 PREHOOK: type: QUERY -POSTHOOK: query: --HIVE-3070 filter on outer join condition removed while merging join tree -explain select * from src a join src b on a.key=b.key left outer join src c on b.key=c.key and b.key<10 +POSTHOOK: query: explain select * from src a join src b on a.key=b.key left outer join src c on b.key=c.key and b.key<10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/mergejoins_mixed.q.out b/ql/src/test/results/clientpositive/mergejoins_mixed.q.out index b5719ab..8f68cae 100644 --- a/ql/src/test/results/clientpositive/mergejoins_mixed.q.out +++ b/ql/src/test/results/clientpositive/mergejoins_mixed.q.out @@ -1,21 +1,15 @@ -PREHOOK: query: -- HIVE-3464 - -create table a (key string, value string) +PREHOOK: query: create table a (key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@a -POSTHOOK: query: -- HIVE-3464 - -create table a (key string, value string) +POSTHOOK: query: create table a (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@a -PREHOOK: query: -- (a-b-c-d) -explain +PREHOOK: query: explain select * from a join a b on (a.key=b.key) left outer join a c on (b.key=c.key) left outer join a d on (a.key=d.key) PREHOOK: type: QUERY -POSTHOOK: query: -- (a-b-c-d) -explain +POSTHOOK: query: explain select * from a join a b on (a.key=b.key) left outer join a c on (b.key=c.key) left outer join a d on (a.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -417,12 +411,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- ((a-b-d)-c) (reordered) -explain +PREHOOK: query: explain select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) left outer join a d on (a.key=d.key) PREHOOK: type: QUERY -POSTHOOK: query: -- ((a-b-d)-c) (reordered) -explain +POSTHOOK: query: explain select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) left outer join a d on (a.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -806,12 +798,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- (((a-b)-c)-d) -explain +PREHOOK: query: explain select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) right outer join a d on (a.key=d.key) PREHOOK: type: QUERY -POSTHOOK: query: -- (((a-b)-c)-d) -explain +POSTHOOK: query: explain select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) right outer join a d on (a.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1404,12 +1394,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- ((a-b)-c-d) -explain +PREHOOK: query: explain select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) left outer join a d on (c.key=d.key) PREHOOK: type: QUERY -POSTHOOK: query: -- ((a-b)-c-d) -explain +POSTHOOK: query: explain select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) left outer join a d on (c.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/mi.q.out b/ql/src/test/results/clientpositive/mi.q.out index 35e054a..585a7be 100644 --- a/ql/src/test/results/clientpositive/mi.q.out +++ b/ql/src/test/results/clientpositive/mi.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table nzhang_t1 like srcpart +PREHOOK: query: create table nzhang_t1 like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@nzhang_t1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table nzhang_t1 like srcpart +POSTHOOK: query: create table nzhang_t1 like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_t1 diff --git a/ql/src/test/results/clientpositive/multi_insert_gby.q.out b/ql/src/test/results/clientpositive/multi_insert_gby.q.out index 190f430..cb97e58 100644 --- a/ql/src/test/results/clientpositive/multi_insert_gby.q.out +++ b/ql/src/test/results/clientpositive/multi_insert_gby.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - ---HIVE-3699 Multiple insert overwrite into multiple tables query stores same results in all tables -create table e1 (key string, count int) +PREHOOK: query: create table e1 (key string, count int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@e1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - ---HIVE-3699 Multiple insert overwrite into multiple tables query stores same results in all tables -create table e1 (key string, count int) +POSTHOOK: query: create table e1 (key string, count int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@e1 diff --git a/ql/src/test/results/clientpositive/multi_insert_gby2.q.out b/ql/src/test/results/clientpositive/multi_insert_gby2.q.out index f4baf04..476dfa7 100644 --- a/ql/src/test/results/clientpositive/multi_insert_gby2.q.out +++ b/ql/src/test/results/clientpositive/multi_insert_gby2.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: --HIVE-3699 Multiple insert overwrite into multiple tables query stores same results in all tables -create table e1 (count int) +PREHOOK: query: create table e1 (count int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@e1 -POSTHOOK: query: --HIVE-3699 Multiple insert overwrite into multiple tables query stores same results in all tables -create table e1 (count int) +POSTHOOK: query: create table e1 (count int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@e1 diff --git a/ql/src/test/results/clientpositive/multi_insert_gby3.q.out b/ql/src/test/results/clientpositive/multi_insert_gby3.q.out index 6ee003b..b25ffcd 100644 --- a/ql/src/test/results/clientpositive/multi_insert_gby3.q.out +++ b/ql/src/test/results/clientpositive/multi_insert_gby3.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS -create table e1 (key string, keyD double) +PREHOOK: query: create table e1 (key string, keyD double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@e1 -POSTHOOK: query: -- SORT_QUERY_RESULTS -create table e1 (key string, keyD double) +POSTHOOK: query: create table e1 (key string, keyD double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@e1 diff --git a/ql/src/test/results/clientpositive/multi_insert_mixed.q.out b/ql/src/test/results/clientpositive/multi_insert_mixed.q.out index aaf9346..eb81b50 100644 --- a/ql/src/test/results/clientpositive/multi_insert_mixed.q.out +++ b/ql/src/test/results/clientpositive/multi_insert_mixed.q.out @@ -22,19 +22,13 @@ POSTHOOK: query: create table src_multi3 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_multi3 -PREHOOK: query: -- Testing the case where a map work contains both shuffling (ReduceSinkOperator) --- and inserting to output table (FileSinkOperator). - -explain +PREHOOK: query: explain from src insert overwrite table src_multi1 select key, count(1) group by key order by key insert overwrite table src_multi2 select value, count(1) group by value order by value insert overwrite table src_multi3 select * where key < 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Testing the case where a map work contains both shuffling (ReduceSinkOperator) --- and inserting to output table (FileSinkOperator). - -explain +POSTHOOK: query: explain from src insert overwrite table src_multi1 select key, count(1) group by key order by key insert overwrite table src_multi2 select value, count(1) group by value order by value diff --git a/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out b/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out index c302dea..af0ef54 100644 --- a/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out +++ b/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table src_multi1 like src +PREHOOK: query: create table src_multi1 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_multi1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table src_multi1 like src +POSTHOOK: query: create table src_multi1 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_multi1 diff --git a/ql/src/test/results/clientpositive/multi_join_union.q.out b/ql/src/test/results/clientpositive/multi_join_union.q.out index b361a1a..9395141 100644 --- a/ql/src/test/results/clientpositive/multi_join_union.q.out +++ b/ql/src/test/results/clientpositive/multi_join_union.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src11 as SELECT * FROM src +PREHOOK: query: CREATE TABLE src11 as SELECT * FROM src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@src11 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src11 as SELECT * FROM src +POSTHOOK: query: CREATE TABLE src11 as SELECT * FROM src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/newline.q.out b/ql/src/test/results/clientpositive/newline.q.out index 305d1e8..132946e 100644 --- a/ql/src/test/results/clientpositive/newline.q.out +++ b/ql/src/test/results/clientpositive/newline.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table tmp_tmp(key string, value string) stored as rcfile +PREHOOK: query: create table tmp_tmp(key string, value string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmp_tmp -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table tmp_tmp(key string, value string) stored as rcfile +POSTHOOK: query: create table tmp_tmp(key string, value string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmp_tmp diff --git a/ql/src/test/results/clientpositive/nomore_ambiguous_table_col.q.out b/ql/src/test/results/clientpositive/nomore_ambiguous_table_col.q.out index 7a353b1..be32a09 100644 --- a/ql/src/test/results/clientpositive/nomore_ambiguous_table_col.q.out +++ b/ql/src/test/results/clientpositive/nomore_ambiguous_table_col.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- was negative/ambiguous_table_col.q - -drop table ambiguous +PREHOOK: query: drop table ambiguous PREHOOK: type: DROPTABLE -POSTHOOK: query: -- was negative/ambiguous_table_col.q - -drop table ambiguous +POSTHOOK: query: drop table ambiguous POSTHOOK: type: DROPTABLE PREHOOK: query: create table ambiguous (key string, value string) PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out b/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out index b67dc89..0cd623d 100644 --- a/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out +++ b/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out @@ -1,12 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- negative, references twice for result of funcion -explain select nkey, nkey + 1 from (select key + 1 as nkey, value from src) a +PREHOOK: query: explain select nkey, nkey + 1 from (select key + 1 as nkey, value from src) a PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- negative, references twice for result of funcion -explain select nkey, nkey + 1 from (select key + 1 as nkey, value from src) a +POSTHOOK: query: explain select nkey, nkey + 1 from (select key + 1 as nkey, value from src) a POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -38,9 +32,7 @@ STAGE PLANS: ListSink Warning: Shuffle Join JOIN[10][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: -- This test query is introduced for HIVE-4968. --- First, we do not convert the join to MapJoin. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count FROM (SELECT * @@ -50,9 +42,7 @@ FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count FROM src1) tmp3 ) tmp4 PREHOOK: type: QUERY -POSTHOOK: query: -- This test query is introduced for HIVE-4968. --- First, we do not convert the join to MapJoin. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count FROM (SELECT * @@ -189,8 +179,7 @@ POSTHOOK: Input: default@src1 66 val_66 25 98 val_98 25 Warning: Map Join MAPJOIN[15][bigTable=?] in task 'Stage-3:MAPRED' is a cross product -PREHOOK: query: -- Then, we convert the join to MapJoin. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count FROM (SELECT * @@ -200,8 +189,7 @@ FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count FROM src1) tmp3 ) tmp4 PREHOOK: type: QUERY -POSTHOOK: query: -- Then, we convert the join to MapJoin. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT tmp4.key as key, tmp4.value as value, tmp4.count as count FROM (SELECT tmp2.key as key, tmp2.value as value, tmp3.count as count FROM (SELECT * diff --git a/ql/src/test/results/clientpositive/nonmr_fetch.q.out b/ql/src/test/results/clientpositive/nonmr_fetch.q.out index c2bf7da..5607089 100644 --- a/ql/src/test/results/clientpositive/nonmr_fetch.q.out +++ b/ql/src/test/results/clientpositive/nonmr_fetch.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- backward compatible (minimal) -explain select * from src limit 10 +PREHOOK: query: explain select * from src limit 10 PREHOOK: type: QUERY -POSTHOOK: query: -- backward compatible (minimal) -explain select * from src limit 10 +POSTHOOK: query: explain select * from src limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -128,11 +126,9 @@ POSTHOOK: Input: default@src 278 98 484 -PREHOOK: query: -- negative, filter on non-partition column -explain select * from srcpart where key > 100 limit 10 +PREHOOK: query: explain select * from srcpart where key > 100 limit 10 PREHOOK: type: QUERY -POSTHOOK: query: -- negative, filter on non-partition column -explain select * from srcpart where key > 100 limit 10 +POSTHOOK: query: explain select * from srcpart where key > 100 limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -183,11 +179,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 265 val_265 2008-04-08 11 193 val_193 2008-04-08 11 401 val_401 2008-04-08 11 -PREHOOK: query: -- negative, table sampling -explain select * from src TABLESAMPLE (0.25 PERCENT) limit 10 +PREHOOK: query: explain select * from src TABLESAMPLE (0.25 PERCENT) limit 10 PREHOOK: type: QUERY -POSTHOOK: query: -- negative, table sampling -explain select * from src TABLESAMPLE (0.25 PERCENT) limit 10 +POSTHOOK: query: explain select * from src TABLESAMPLE (0.25 PERCENT) limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -239,11 +233,9 @@ POSTHOOK: Input: default@src 278 val_278 98 val_98 484 val_484 -PREHOOK: query: -- backward compatible (more) -explain select * from src limit 10 +PREHOOK: query: explain select * from src limit 10 PREHOOK: type: QUERY -POSTHOOK: query: -- backward compatible (more) -explain select * from src limit 10 +POSTHOOK: query: explain select * from src limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -327,11 +319,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 278 val_278 2008-04-08 11 98 val_98 2008-04-08 11 484 val_484 2008-04-08 11 -PREHOOK: query: -- select expression -explain select cast(key as int) * 10, upper(value) from src limit 10 +PREHOOK: query: explain select cast(key as int) * 10, upper(value) from src limit 10 PREHOOK: type: QUERY -POSTHOOK: query: -- select expression -explain select cast(key as int) * 10, upper(value) from src limit 10 +POSTHOOK: query: explain select cast(key as int) * 10, upper(value) from src limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -371,11 +361,9 @@ POSTHOOK: Input: default@src 2780 VAL_278 980 VAL_98 4840 VAL_484 -PREHOOK: query: -- filter on non-partition column -explain select key from src where key < 100 limit 10 +PREHOOK: query: explain select key from src where key < 100 limit 10 PREHOOK: type: QUERY -POSTHOOK: query: -- filter on non-partition column -explain select key from src where key < 100 limit 10 +POSTHOOK: query: explain select key from src where key < 100 limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -418,11 +406,9 @@ POSTHOOK: Input: default@src 17 0 57 -PREHOOK: query: -- select expr for partitioned table -explain select key from srcpart where ds='2008-04-08' AND hr='11' limit 10 +PREHOOK: query: explain select key from srcpart where ds='2008-04-08' AND hr='11' limit 10 PREHOOK: type: QUERY -POSTHOOK: query: -- select expr for partitioned table -explain select key from srcpart where ds='2008-04-08' AND hr='11' limit 10 +POSTHOOK: query: explain select key from srcpart where ds='2008-04-08' AND hr='11' limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -464,11 +450,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 278 98 484 -PREHOOK: query: -- virtual columns -explain select *, BLOCK__OFFSET__INSIDE__FILE from src where key < 10 limit 10 +PREHOOK: query: explain select *, BLOCK__OFFSET__INSIDE__FILE from src where key < 10 limit 10 PREHOOK: type: QUERY -POSTHOOK: query: -- virtual columns -explain select *, BLOCK__OFFSET__INSIDE__FILE from src where key < 10 limit 10 +POSTHOOK: query: explain select *, BLOCK__OFFSET__INSIDE__FILE from src where key < 10 limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -511,11 +495,9 @@ POSTHOOK: Input: default@src 17 val_17 910 0 val_0 968 57 val_57 1024 -PREHOOK: query: -- virtual columns on partitioned table -explain select *, BLOCK__OFFSET__INSIDE__FILE from srcpart where key < 10 limit 30 +PREHOOK: query: explain select *, BLOCK__OFFSET__INSIDE__FILE from srcpart where key < 10 limit 30 PREHOOK: type: QUERY -POSTHOOK: query: -- virtual columns on partitioned table -explain select *, BLOCK__OFFSET__INSIDE__FILE from srcpart where key < 10 limit 30 +POSTHOOK: query: explain select *, BLOCK__OFFSET__INSIDE__FILE from srcpart where key < 10 limit 30 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -586,11 +568,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 2 val_2 2008-04-09 11 4004 5 val_5 2008-04-09 11 4540 9 val_9 2008-04-09 11 5398 -PREHOOK: query: -- bucket sampling -explain select *, BLOCK__OFFSET__INSIDE__FILE from src TABLESAMPLE (BUCKET 1 OUT OF 40 ON key) +PREHOOK: query: explain select *, BLOCK__OFFSET__INSIDE__FILE from src TABLESAMPLE (BUCKET 1 OUT OF 40 ON key) PREHOOK: type: QUERY -POSTHOOK: query: -- bucket sampling -explain select *, BLOCK__OFFSET__INSIDE__FILE from src TABLESAMPLE (BUCKET 1 OUT OF 40 ON key) +POSTHOOK: query: explain select *, BLOCK__OFFSET__INSIDE__FILE from src TABLESAMPLE (BUCKET 1 OUT OF 40 ON key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -695,11 +675,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 77 val_77 2008-04-09 12 2622 187 val_187 2008-04-09 12 4516 448 val_448 2008-04-09 12 5636 -PREHOOK: query: -- split sampling -explain select * from src TABLESAMPLE (0.25 PERCENT) +PREHOOK: query: explain select * from src TABLESAMPLE (0.25 PERCENT) PREHOOK: type: QUERY -POSTHOOK: query: -- split sampling -explain select * from src TABLESAMPLE (0.25 PERCENT) +POSTHOOK: query: explain select * from src TABLESAMPLE (0.25 PERCENT) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -773,12 +751,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 86 val_86 2008-04-09 11 12 238 val_238 2008-04-09 12 0 86 val_86 2008-04-09 12 12 -PREHOOK: query: -- sub query -explain +PREHOOK: query: explain select key, value from (select value key,key value from src where key > 200) a where value < 250 limit 20 PREHOOK: type: QUERY -POSTHOOK: query: -- sub query -explain +POSTHOOK: query: explain select key, value from (select value key,key value from src where key > 200) a where value < 250 limit 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -832,12 +808,10 @@ val_221 221 val_241 241 val_230 230 val_217 217 -PREHOOK: query: -- lateral view -explain +PREHOOK: query: explain select key,X from srcpart lateral view explode(array(key,value)) L as x where (ds='2008-04-08' AND hr='11') limit 20 PREHOOK: type: QUERY -POSTHOOK: query: -- lateral view -explain +POSTHOOK: query: explain select key,X from srcpart lateral view explode(array(key,value)) L as x where (ds='2008-04-08' AND hr='11') limit 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -917,11 +891,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 98 val_98 484 484 484 val_484 -PREHOOK: query: -- non deterministic func -explain select key, value, BLOCK__OFFSET__INSIDE__FILE from srcpart where ds="2008-04-09" AND rand() > 1 +PREHOOK: query: explain select key, value, BLOCK__OFFSET__INSIDE__FILE from srcpart where ds="2008-04-09" AND rand() > 1 PREHOOK: type: QUERY -POSTHOOK: query: -- non deterministic func -explain select key, value, BLOCK__OFFSET__INSIDE__FILE from srcpart where ds="2008-04-09" AND rand() > 1 +POSTHOOK: query: explain select key, value, BLOCK__OFFSET__INSIDE__FILE from srcpart where ds="2008-04-09" AND rand() > 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -955,11 +927,9 @@ POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### -PREHOOK: query: -- negative, groupby -explain select key, count(value) from src group by key +PREHOOK: query: explain select key, count(value) from src group by key PREHOOK: type: QUERY -POSTHOOK: query: -- negative, groupby -explain select key, count(value) from src group by key +POSTHOOK: query: explain select key, count(value) from src group by key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1009,11 +979,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- negative, distinct -explain select distinct key, value from src +PREHOOK: query: explain select distinct key, value from src PREHOOK: type: QUERY -POSTHOOK: query: -- negative, distinct -explain select distinct key, value from src +POSTHOOK: query: explain select distinct key, value from src POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1060,11 +1028,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- negative, CTAS -explain create table srcx as select distinct key, value from src +PREHOOK: query: explain create table srcx as select distinct key, value from src PREHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: query: -- negative, CTAS -explain create table srcx as select distinct key, value from src +POSTHOOK: query: explain create table srcx as select distinct key, value from src POSTHOOK: type: CREATETABLE_AS_SELECT STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1126,11 +1092,9 @@ STAGE PLANS: Stage: Stage-2 Stats-Aggr Operator -PREHOOK: query: -- negative, analyze -explain analyze table src compute statistics +PREHOOK: query: explain analyze table src compute statistics PREHOOK: type: QUERY -POSTHOOK: query: -- negative, analyze -explain analyze table src compute statistics +POSTHOOK: query: explain analyze table src compute statistics POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -1147,11 +1111,9 @@ STAGE PLANS: Stage: Stage-1 Stats-Aggr Operator -PREHOOK: query: -- negative, join -explain select * from src join src src2 on src.key=src2.key +PREHOOK: query: explain select * from src join src src2 on src.key=src2.key PREHOOK: type: QUERY -POSTHOOK: query: -- negative, join -explain select * from src join src src2 on src.key=src2.key +POSTHOOK: query: explain select * from src join src src2 on src.key=src2.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/nonmr_fetch_threshold.q.out b/ql/src/test/results/clientpositive/nonmr_fetch_threshold.q.out index 216bf72..6c36a35 100644 --- a/ql/src/test/results/clientpositive/nonmr_fetch_threshold.q.out +++ b/ql/src/test/results/clientpositive/nonmr_fetch_threshold.q.out @@ -94,11 +94,9 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- Scans without limit (should be Fetch task now) -explain select concat(key, value) from src +PREHOOK: query: explain select concat(key, value) from src PREHOOK: type: QUERY -POSTHOOK: query: -- Scans without limit (should be Fetch task now) -explain select concat(key, value) from src +POSTHOOK: query: explain select concat(key, value) from src POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -117,11 +115,9 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- from HIVE-7397, limit + partition pruning filter -explain select * from srcpart where ds='2008-04-08' AND hr='11' limit 10 +PREHOOK: query: explain select * from srcpart where ds='2008-04-08' AND hr='11' limit 10 PREHOOK: type: QUERY -POSTHOOK: query: -- from HIVE-7397, limit + partition pruning filter -explain select * from srcpart where ds='2008-04-08' AND hr='11' limit 10 +POSTHOOK: query: explain select * from srcpart where ds='2008-04-08' AND hr='11' limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -167,11 +163,9 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 100 Basic stats: COMPLETE Column stats: NONE ListSink -PREHOOK: query: -- Scans without limit (should not be Fetch task now) -explain select concat(key, value) from src +PREHOOK: query: explain select concat(key, value) from src PREHOOK: type: QUERY -POSTHOOK: query: -- Scans without limit (should not be Fetch task now) -explain select concat(key, value) from src +POSTHOOK: query: explain select concat(key, value) from src POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -202,11 +196,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Simple Scans without limit (will be Fetch task now) -explain select key, value from src +PREHOOK: query: explain select key, value from src PREHOOK: type: QUERY -POSTHOOK: query: -- Simple Scans without limit (will be Fetch task now) -explain select key, value from src +POSTHOOK: query: explain select key, value from src POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/nullformat.q.out b/ql/src/test/results/clientpositive/nullformat.q.out index af91470..b5821b7 100644 --- a/ql/src/test/results/clientpositive/nullformat.q.out +++ b/ql/src/test/results/clientpositive/nullformat.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- base table with null data -DROP TABLE IF EXISTS base_tab +PREHOOK: query: DROP TABLE IF EXISTS base_tab PREHOOK: type: DROPTABLE -POSTHOOK: query: -- base table with null data -DROP TABLE IF EXISTS base_tab +POSTHOOK: query: DROP TABLE IF EXISTS base_tab POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE base_tab(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE @@ -32,11 +30,9 @@ c string d string #### A masked pattern was here #### -PREHOOK: query: -- table with non-default null format -DROP TABLE IF EXISTS null_tab1 +PREHOOK: query: DROP TABLE IF EXISTS null_tab1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- table with non-default null format -DROP TABLE IF EXISTS null_tab1 +POSTHOOK: query: DROP TABLE IF EXISTS null_tab1 POSTHOOK: type: DROPTABLE PREHOOK: query: EXPLAIN CREATE TABLE null_tab1(a STRING, b STRING) ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' PREHOOK: type: CREATETABLE @@ -94,13 +90,11 @@ LOCATION #### A masked pattern was here #### TBLPROPERTIES ( #### A masked pattern was here #### -PREHOOK: query: -- load null data from another table and verify that the null is stored in the expected format -INSERT OVERWRITE TABLE null_tab1 SELECT a,b FROM base_tab +PREHOOK: query: INSERT OVERWRITE TABLE null_tab1 SELECT a,b FROM base_tab PREHOOK: type: QUERY PREHOOK: Input: default@base_tab PREHOOK: Output: default@null_tab1 -POSTHOOK: query: -- load null data from another table and verify that the null is stored in the expected format -INSERT OVERWRITE TABLE null_tab1 SELECT a,b FROM base_tab +POSTHOOK: query: INSERT OVERWRITE TABLE null_tab1 SELECT a,b FROM base_tab POSTHOOK: type: QUERY POSTHOOK: Input: default@base_tab POSTHOOK: Output: default@null_tab1 @@ -134,13 +128,11 @@ NULL NULL 1.0 NULL 1.0 1 1.0 1 -PREHOOK: query: -- alter the null format and verify that the old null format is no longer in effect -ALTER TABLE null_tab1 SET SERDEPROPERTIES ( 'serialization.null.format'='foo') +PREHOOK: query: ALTER TABLE null_tab1 SET SERDEPROPERTIES ( 'serialization.null.format'='foo') PREHOOK: type: ALTERTABLE_SERDEPROPERTIES PREHOOK: Input: default@null_tab1 PREHOOK: Output: default@null_tab1 -POSTHOOK: query: -- alter the null format and verify that the old null format is no longer in effect -ALTER TABLE null_tab1 SET SERDEPROPERTIES ( 'serialization.null.format'='foo') +POSTHOOK: query: ALTER TABLE null_tab1 SET SERDEPROPERTIES ( 'serialization.null.format'='foo') POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES POSTHOOK: Input: default@null_tab1 POSTHOOK: Output: default@null_tab1 diff --git a/ql/src/test/results/clientpositive/nullformatCTAS.q.out b/ql/src/test/results/clientpositive/nullformatCTAS.q.out index ea51a56..cda0965 100644 --- a/ql/src/test/results/clientpositive/nullformatCTAS.q.out +++ b/ql/src/test/results/clientpositive/nullformatCTAS.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- base table with null data -DROP TABLE IF EXISTS base_tab +PREHOOK: query: DROP TABLE IF EXISTS base_tab PREHOOK: type: DROPTABLE -POSTHOOK: query: -- base table with null data -DROP TABLE IF EXISTS base_tab +POSTHOOK: query: DROP TABLE IF EXISTS base_tab POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE base_tab(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE @@ -32,11 +30,9 @@ c string d string #### A masked pattern was here #### -PREHOOK: query: -- table with non-default null format -DROP TABLE IF EXISTS null_tab3 +PREHOOK: query: DROP TABLE IF EXISTS null_tab3 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- table with non-default null format -DROP TABLE IF EXISTS null_tab3 +POSTHOOK: query: DROP TABLE IF EXISTS null_tab3 POSTHOOK: type: DROPTABLE PREHOOK: query: EXPLAIN CREATE TABLE null_tab3 ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' AS SELECT a, b FROM base_tab @@ -205,13 +201,11 @@ NULL NULL 1.0 NULL 1.0 1 1.0 1 -PREHOOK: query: -- alter the null format and verify that the old null format is no longer in effect -ALTER TABLE null_tab3 SET SERDEPROPERTIES ( 'serialization.null.format'='foo') +PREHOOK: query: ALTER TABLE null_tab3 SET SERDEPROPERTIES ( 'serialization.null.format'='foo') PREHOOK: type: ALTERTABLE_SERDEPROPERTIES PREHOOK: Input: default@null_tab3 PREHOOK: Output: default@null_tab3 -POSTHOOK: query: -- alter the null format and verify that the old null format is no longer in effect -ALTER TABLE null_tab3 SET SERDEPROPERTIES ( 'serialization.null.format'='foo') +POSTHOOK: query: ALTER TABLE null_tab3 SET SERDEPROPERTIES ( 'serialization.null.format'='foo') POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES POSTHOOK: Input: default@null_tab3 POSTHOOK: Output: default@null_tab3 diff --git a/ql/src/test/results/clientpositive/nullformatdir.q.out b/ql/src/test/results/clientpositive/nullformatdir.q.out index 14a79b2..5e6c986 100644 --- a/ql/src/test/results/clientpositive/nullformatdir.q.out +++ b/ql/src/test/results/clientpositive/nullformatdir.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- base table with null data -DROP TABLE IF EXISTS base_tab +PREHOOK: query: DROP TABLE IF EXISTS base_tab PREHOOK: type: DROPTABLE -POSTHOOK: query: -- base table with null data -DROP TABLE IF EXISTS base_tab +POSTHOOK: query: DROP TABLE IF EXISTS base_tab POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE base_tab(a STRING, b STRING, c STRING, d STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE @@ -50,11 +48,9 @@ fooNullfooNull 1.0fooNull 1.01 1.01 -PREHOOK: query: -- load the exported data back into a table with same null format and verify null values -DROP TABLE IF EXISTS null_tab2 +PREHOOK: query: DROP TABLE IF EXISTS null_tab2 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- load the exported data back into a table with same null format and verify null values -DROP TABLE IF EXISTS null_tab2 +POSTHOOK: query: DROP TABLE IF EXISTS null_tab2 POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE null_tab2(a STRING, b STRING) ROW FORMAT DELIMITED NULL DEFINED AS 'fooNull' PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/orc_dictionary_threshold.q.out b/ql/src/test/results/clientpositive/orc_dictionary_threshold.q.out index a57243e..bde2b53 100644 --- a/ql/src/test/results/clientpositive/orc_dictionary_threshold.q.out +++ b/ql/src/test/results/clientpositive/orc_dictionary_threshold.q.out @@ -1,45 +1,31 @@ -PREHOOK: query: -- Set the threshold to -1 to guarantee dictionary encoding is turned off --- Tests that the data can be read back correctly when a string column is stored --- without dictionary encoding - -CREATE TABLE test_orc (key STRING) +PREHOOK: query: CREATE TABLE test_orc (key STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_orc -POSTHOOK: query: -- Set the threshold to -1 to guarantee dictionary encoding is turned off --- Tests that the data can be read back correctly when a string column is stored --- without dictionary encoding - -CREATE TABLE test_orc (key STRING) +POSTHOOK: query: CREATE TABLE test_orc (key STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_orc -PREHOOK: query: -- should be single split -INSERT OVERWRITE TABLE test_orc SELECT key FROM src TABLESAMPLE (10 ROWS) +PREHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT key FROM src TABLESAMPLE (10 ROWS) PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_orc -POSTHOOK: query: -- should be single split -INSERT OVERWRITE TABLE test_orc SELECT key FROM src TABLESAMPLE (10 ROWS) +POSTHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT key FROM src TABLESAMPLE (10 ROWS) POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@test_orc POSTHOOK: Lineage: test_orc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: -- Test reading the column back - -SELECT * FROM test_orc +PREHOOK: query: SELECT * FROM test_orc PREHOOK: type: QUERY PREHOOK: Input: default@test_orc #### A masked pattern was here #### -POSTHOOK: query: -- Test reading the column back - -SELECT * FROM test_orc +POSTHOOK: query: SELECT * FROM test_orc POSTHOOK: type: QUERY POSTHOOK: Input: default@test_orc #### A masked pattern was here #### @@ -79,15 +65,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1kv2.cogroup.txt' POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@src_thousand -PREHOOK: query: -- Add data to the table in such a way that alternate stripes encode the column --- differently. Setting orc.stripe.size = 1 guarantees the stripes each have --- 5000 rows. The first stripe will have 5 * 630 distinct rows and thus be --- above the cutoff of 50% and will be direct encoded. The second stripe --- will have 5 * 1 distinct rows and thus be under the cutoff and will be --- dictionary encoded. The final stripe will have 630 out of 1000 and be --- direct encoded. - -INSERT OVERWRITE TABLE test_orc +PREHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT key FROM ( SELECT CONCAT("a", key) AS key FROM src_thousand UNION ALL @@ -114,15 +92,7 @@ SELECT CONCAT("k", key) AS key FROM src_thousand PREHOOK: type: QUERY PREHOOK: Input: default@src_thousand PREHOOK: Output: default@test_orc -POSTHOOK: query: -- Add data to the table in such a way that alternate stripes encode the column --- differently. Setting orc.stripe.size = 1 guarantees the stripes each have --- 5000 rows. The first stripe will have 5 * 630 distinct rows and thus be --- above the cutoff of 50% and will be direct encoded. The second stripe --- will have 5 * 1 distinct rows and thus be under the cutoff and will be --- dictionary encoded. The final stripe will have 630 out of 1000 and be --- direct encoded. - -INSERT OVERWRITE TABLE test_orc +POSTHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT key FROM ( SELECT CONCAT("a", key) AS key FROM src_thousand UNION ALL diff --git a/ql/src/test/results/clientpositive/orc_diff_part_cols.q.out b/ql/src/test/results/clientpositive/orc_diff_part_cols.q.out index e33d0f4..7e8347a 100644 --- a/ql/src/test/results/clientpositive/orc_diff_part_cols.q.out +++ b/ql/src/test/results/clientpositive/orc_diff_part_cols.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE test_orc (key STRING) +PREHOOK: query: CREATE TABLE test_orc (key STRING) PARTITIONED BY (part STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' @@ -8,9 +6,7 @@ OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_orc -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE test_orc (key STRING) +POSTHOOK: query: CREATE TABLE test_orc (key STRING) PARTITIONED BY (part STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' @@ -18,19 +14,11 @@ OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_orc -PREHOOK: query: -- Create a table with one column write to a partition, then add an additional column and write --- to another partition --- This can produce unexpected results with CombineHiveInputFormat - -INSERT OVERWRITE TABLE test_orc PARTITION (part = '1') SELECT key FROM src tablesample (5 rows) +PREHOOK: query: INSERT OVERWRITE TABLE test_orc PARTITION (part = '1') SELECT key FROM src tablesample (5 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_orc@part=1 -POSTHOOK: query: -- Create a table with one column write to a partition, then add an additional column and write --- to another partition --- This can produce unexpected results with CombineHiveInputFormat - -INSERT OVERWRITE TABLE test_orc PARTITION (part = '1') SELECT key FROM src tablesample (5 rows) +POSTHOOK: query: INSERT OVERWRITE TABLE test_orc PARTITION (part = '1') SELECT key FROM src tablesample (5 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@test_orc@part=1 diff --git a/ql/src/test/results/clientpositive/orc_diff_part_cols2.q.out b/ql/src/test/results/clientpositive/orc_diff_part_cols2.q.out index 91385c6..7900cb9 100644 --- a/ql/src/test/results/clientpositive/orc_diff_part_cols2.q.out +++ b/ql/src/test/results/clientpositive/orc_diff_part_cols2.q.out @@ -1,19 +1,9 @@ -PREHOOK: query: -- Create a table with one column, write to it, then add an additional column --- This can break reads - --- SORT_QUERY_RESULTS - -CREATE TABLE test_orc (key STRING) +PREHOOK: query: CREATE TABLE test_orc (key STRING) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_orc -POSTHOOK: query: -- Create a table with one column, write to it, then add an additional column --- This can break reads - --- SORT_QUERY_RESULTS - -CREATE TABLE test_orc (key STRING) +POSTHOOK: query: CREATE TABLE test_orc (key STRING) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/orc_empty_files.q.out b/ql/src/test/results/clientpositive/orc_empty_files.q.out index 2426ca3..78f2b5c 100644 --- a/ql/src/test/results/clientpositive/orc_empty_files.q.out +++ b/ql/src/test/results/clientpositive/orc_empty_files.q.out @@ -14,21 +14,11 @@ OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_orc -PREHOOK: query: -- Creates a table bucketed into 3 buckets, but only one contains data, specifically bucket 1, --- buckets 0 and 2 are empty, so this tests reading from and empty file followed by a file --- containing data and a file containing data followed by an empty file. --- This can produce unexpected results with CombineHiveInputFormat - -INSERT OVERWRITE TABLE test_orc SELECT one, COUNT(*) FROM (SELECT 1 AS one FROM src) a GROUP BY one +PREHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT one, COUNT(*) FROM (SELECT 1 AS one FROM src) a GROUP BY one PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_orc -POSTHOOK: query: -- Creates a table bucketed into 3 buckets, but only one contains data, specifically bucket 1, --- buckets 0 and 2 are empty, so this tests reading from and empty file followed by a file --- containing data and a file containing data followed by an empty file. --- This can produce unexpected results with CombineHiveInputFormat - -INSERT OVERWRITE TABLE test_orc SELECT one, COUNT(*) FROM (SELECT 1 AS one FROM src) a GROUP BY one +POSTHOOK: query: INSERT OVERWRITE TABLE test_orc SELECT one, COUNT(*) FROM (SELECT 1 AS one FROM src) a GROUP BY one POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@test_orc diff --git a/ql/src/test/results/clientpositive/orc_empty_strings.q.out b/ql/src/test/results/clientpositive/orc_empty_strings.q.out index 9ab4e8d..862836d 100644 --- a/ql/src/test/results/clientpositive/orc_empty_strings.q.out +++ b/ql/src/test/results/clientpositive/orc_empty_strings.q.out @@ -1,15 +1,11 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE test_orc (key STRING) +PREHOOK: query: CREATE TABLE test_orc (key STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_orc -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE test_orc (key STRING) +POSTHOOK: query: CREATE TABLE test_orc (key STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.orc.OrcSerde' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat' @@ -25,15 +21,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@test_orc POSTHOOK: Lineage: test_orc.key SIMPLE [] -PREHOOK: query: -- Test reading a column which is just empty strings - -SELECT * FROM test_orc +PREHOOK: query: SELECT * FROM test_orc PREHOOK: type: QUERY PREHOOK: Input: default@test_orc #### A masked pattern was here #### -POSTHOOK: query: -- Test reading a column which is just empty strings - -SELECT * FROM test_orc +POSTHOOK: query: SELECT * FROM test_orc POSTHOOK: type: QUERY POSTHOOK: Input: default@test_orc #### A masked pattern was here #### @@ -56,15 +48,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@test_orc POSTHOOK: Lineage: test_orc.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: -- Test reading a column which has some empty strings - -SELECT * FROM test_orc +PREHOOK: query: SELECT * FROM test_orc PREHOOK: type: QUERY PREHOOK: Input: default@test_orc #### A masked pattern was here #### -POSTHOOK: query: -- Test reading a column which has some empty strings - -SELECT * FROM test_orc +POSTHOOK: query: SELECT * FROM test_orc POSTHOOK: type: QUERY POSTHOOK: Input: default@test_orc #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/orc_ends_with_nulls.q.out b/ql/src/test/results/clientpositive/orc_ends_with_nulls.q.out index 5011a4a..84e5252 100644 --- a/ql/src/test/results/clientpositive/orc_ends_with_nulls.q.out +++ b/ql/src/test/results/clientpositive/orc_ends_with_nulls.q.out @@ -20,19 +20,11 @@ POSTHOOK: query: ALTER TABLE test_orc SET SERDEPROPERTIES ('orc.row.index.stride POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES POSTHOOK: Input: default@test_orc POSTHOOK: Output: default@test_orc -PREHOOK: query: -- nulls.txt is a file containing a non-null string row followed by 1000 null string rows --- this produces the effect that the number of non-null rows between the last and second --- to last index stride are the same (there's only two index strides) - -CREATE TABLE src_null(a STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE src_null(a STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_null -POSTHOOK: query: -- nulls.txt is a file containing a non-null string row followed by 1000 null string rows --- this produces the effect that the number of non-null rows between the last and second --- to last index stride are the same (there's only two index strides) - -CREATE TABLE src_null(a STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE src_null(a STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_null diff --git a/ql/src/test/results/clientpositive/orc_merge1.q.out b/ql/src/test/results/clientpositive/orc_merge1.q.out index 3f047da..a83e85b 100644 --- a/ql/src/test/results/clientpositive/orc_merge1.q.out +++ b/ql/src/test/results/clientpositive/orc_merge1.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE orcfile_merge1 +PREHOOK: query: DROP TABLE orcfile_merge1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE orcfile_merge1 +POSTHOOK: query: DROP TABLE orcfile_merge1 POSTHOOK: type: DROPTABLE PREHOOK: query: DROP TABLE orcfile_merge1b PREHOOK: type: DROPTABLE @@ -44,14 +40,12 @@ POSTHOOK: query: CREATE TABLE orcfile_merge1c (key INT, value STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@orcfile_merge1c -PREHOOK: query: -- merge disabled -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- merge disabled -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src @@ -116,14 +110,12 @@ POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).key EXPRESSION [(src)sr POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 2 items #### A masked pattern was here #### -PREHOOK: query: -- auto-merge slow way -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- auto-merge slow way -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src @@ -232,14 +224,12 @@ POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).key EXPRESSION [(src)s POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items #### A masked pattern was here #### -PREHOOK: query: -- auto-merge fast way -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- auto-merge fast way -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src @@ -338,8 +328,7 @@ POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).key EXPRESSION [(src)s POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items #### A masked pattern was here #### -PREHOOK: query: -- Verify -SELECT SUM(HASH(c)) FROM ( +PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM orcfile_merge1 WHERE ds='1' ) t @@ -348,8 +337,7 @@ PREHOOK: Input: default@orcfile_merge1 PREHOOK: Input: default@orcfile_merge1@ds=1/part=0 PREHOOK: Input: default@orcfile_merge1@ds=1/part=1 #### A masked pattern was here #### -POSTHOOK: query: -- Verify -SELECT SUM(HASH(c)) FROM ( +POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM orcfile_merge1 WHERE ds='1' ) t diff --git a/ql/src/test/results/clientpositive/orc_merge10.q.out b/ql/src/test/results/clientpositive/orc_merge10.q.out index 1d64ae5..8d4cb0d 100644 --- a/ql/src/test/results/clientpositive/orc_merge10.q.out +++ b/ql/src/test/results/clientpositive/orc_merge10.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE orcfile_merge1 +PREHOOK: query: DROP TABLE orcfile_merge1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE orcfile_merge1 +POSTHOOK: query: DROP TABLE orcfile_merge1 POSTHOOK: type: DROPTABLE PREHOOK: query: DROP TABLE orcfile_merge1b PREHOOK: type: DROPTABLE @@ -44,14 +40,12 @@ POSTHOOK: query: CREATE TABLE orcfile_merge1c (key INT, value STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@orcfile_merge1c -PREHOOK: query: -- merge disabled -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- merge disabled -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src @@ -116,14 +110,12 @@ POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).key EXPRESSION [(src)sr POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 2 items #### A masked pattern was here #### -PREHOOK: query: -- auto-merge slow way -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- auto-merge slow way -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src @@ -232,14 +224,12 @@ POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).key EXPRESSION [(src)s POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items #### A masked pattern was here #### -PREHOOK: query: -- auto-merge fast way -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- auto-merge fast way -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src @@ -338,8 +328,7 @@ POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).key EXPRESSION [(src)s POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items #### A masked pattern was here #### -PREHOOK: query: -- Verify -SELECT SUM(HASH(c)) FROM ( +PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM orcfile_merge1 WHERE ds='1' ) t @@ -348,8 +337,7 @@ PREHOOK: Input: default@orcfile_merge1 PREHOOK: Input: default@orcfile_merge1@ds=1/part=0 PREHOOK: Input: default@orcfile_merge1@ds=1/part=1 #### A masked pattern was here #### -POSTHOOK: query: -- Verify -SELECT SUM(HASH(c)) FROM ( +POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM orcfile_merge1 WHERE ds='1' ) t @@ -404,11 +392,9 @@ POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=0 POSTHOOK: Input: default@orcfile_merge1b@ds=1/part=1 #### A masked pattern was here #### 500 -PREHOOK: query: -- concatenate -explain ALTER TABLE orcfile_merge1 PARTITION (ds='1', part='0') CONCATENATE +PREHOOK: query: explain ALTER TABLE orcfile_merge1 PARTITION (ds='1', part='0') CONCATENATE PREHOOK: type: ALTER_PARTITION_MERGE -POSTHOOK: query: -- concatenate -explain ALTER TABLE orcfile_merge1 PARTITION (ds='1', part='0') CONCATENATE +POSTHOOK: query: explain ALTER TABLE orcfile_merge1 PARTITION (ds='1', part='0') CONCATENATE POSTHOOK: type: ALTER_PARTITION_MERGE STAGE DEPENDENCIES: Stage-0 is a root stage @@ -444,8 +430,7 @@ POSTHOOK: Input: default@orcfile_merge1 POSTHOOK: Output: default@orcfile_merge1@ds=1/part=0 Found 1 items #### A masked pattern was here #### -PREHOOK: query: -- Verify -SELECT SUM(HASH(c)) FROM ( +PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM orcfile_merge1c WHERE ds='1' ) t @@ -454,8 +439,7 @@ PREHOOK: Input: default@orcfile_merge1c PREHOOK: Input: default@orcfile_merge1c@ds=1/part=0 PREHOOK: Input: default@orcfile_merge1c@ds=1/part=1 #### A masked pattern was here #### -POSTHOOK: query: -- Verify -SELECT SUM(HASH(c)) FROM ( +POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM orcfile_merge1c WHERE ds='1' ) t diff --git a/ql/src/test/results/clientpositive/orc_merge11.q.out b/ql/src/test/results/clientpositive/orc_merge11.q.out index 5143a6a..a8ab854 100644 --- a/ql/src/test/results/clientpositive/orc_merge11.q.out +++ b/ql/src/test/results/clientpositive/orc_merge11.q.out @@ -253,8 +253,7 @@ ________________________________________________________________________________ -- END ORC FILE DUMP -- 2 foo 0.8 1 1969-12-31 16:00:00 -PREHOOK: query: -- concatenate -ALTER TABLE orcfile_merge1 CONCATENATE +PREHOOK: query: ALTER TABLE orcfile_merge1 CONCATENATE PREHOOK: type: ALTER_TABLE_MERGE PREHOOK: Input: default@orcfile_merge1 PREHOOK: Output: default@orcfile_merge1 @@ -265,8 +264,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@orc_split_elim #### A masked pattern was here #### 50000 -PREHOOK: query: -- will have double the number of rows -select count(*) from orcfile_merge1 +PREHOOK: query: select count(*) from orcfile_merge1 PREHOOK: type: QUERY PREHOOK: Input: default@orcfile_merge1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/orc_merge5.q.out b/ql/src/test/results/clientpositive/orc_merge5.q.out index 77c01c7..e845186 100644 --- a/ql/src/test/results/clientpositive/orc_merge5.q.out +++ b/ql/src/test/results/clientpositive/orc_merge5.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@orc_merge5 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@orc_merge5 @@ -26,11 +22,9 @@ POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' in POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@orc_merge5 -PREHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 PREHOOK: type: QUERY -POSTHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -87,13 +81,11 @@ POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchem POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: -- 3 files total -analyze table orc_merge5b compute statistics noscan +PREHOOK: query: analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: -- 3 files total -analyze table orc_merge5b compute statistics noscan +POSTHOOK: query: analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b @@ -110,11 +102,9 @@ POSTHOOK: Input: default@orc_merge5b 13 bar 80.0 2 1969-12-31 16:00:05 2 foo 0.8 1 1969-12-31 16:00:00 5 eat 0.8 6 1969-12-31 16:00:20 -PREHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 PREHOOK: type: QUERY -POSTHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -205,13 +195,11 @@ POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchem POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: -- 1 file after merging -analyze table orc_merge5b compute statistics noscan +PREHOOK: query: analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: -- 1 file after merging -analyze table orc_merge5b compute statistics noscan +POSTHOOK: query: analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b @@ -295,13 +283,11 @@ POSTHOOK: query: alter table orc_merge5b concatenate POSTHOOK: type: ALTER_TABLE_MERGE POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -PREHOOK: query: -- 1 file after merging -analyze table orc_merge5b compute statistics noscan +PREHOOK: query: analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: -- 1 file after merging -analyze table orc_merge5b compute statistics noscan +POSTHOOK: query: analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b diff --git a/ql/src/test/results/clientpositive/orc_merge6.q.out b/ql/src/test/results/clientpositive/orc_merge6.q.out index 05f548c..5ece361 100644 --- a/ql/src/test/results/clientpositive/orc_merge6.q.out +++ b/ql/src/test/results/clientpositive/orc_merge6.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- orc file merge tests for static partitions -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@orc_merge5 -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- orc file merge tests for static partitions -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@orc_merge5 @@ -28,11 +22,9 @@ POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' in POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@orc_merge5 -PREHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: query: explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 PREHOOK: type: QUERY -POSTHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -105,14 +97,12 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).string1 SIMPLE [(orc POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: -- 3 files total -analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +PREHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: query: -- 3 files total -analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +POSTHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a @@ -157,11 +147,9 @@ POSTHOOK: Input: default@orc_merge5a@year=2001/hour=24 2 foo 0.8 1 1969-12-31 16:00:00 2001 24 5 eat 0.8 6 1969-12-31 16:00:20 2000 24 5 eat 0.8 6 1969-12-31 16:00:20 2001 24 -PREHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: query: explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 PREHOOK: type: QUERY -POSTHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -268,14 +256,12 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).string1 SIMPLE [(orc POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: -- 1 file after merging -analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +PREHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: query: -- 1 file after merging -analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +POSTHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a @@ -440,14 +426,12 @@ POSTHOOK: query: alter table orc_merge5a partition(year="2001",hour=24) concaten POSTHOOK: type: ALTER_PARTITION_MERGE POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 -PREHOOK: query: -- 1 file after merging -analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +PREHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: query: -- 1 file after merging -analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +POSTHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a diff --git a/ql/src/test/results/clientpositive/orc_merge9.q.out b/ql/src/test/results/clientpositive/orc_merge9.q.out index bdf0fd3..28e51b2 100644 --- a/ql/src/test/results/clientpositive/orc_merge9.q.out +++ b/ql/src/test/results/clientpositive/orc_merge9.q.out @@ -64,15 +64,11 @@ POSTHOOK: Input: default@ts_merge 50000 Found 1 items #### A masked pattern was here #### -PREHOOK: query: -- incompatible merge test (stripe statistics missing) - -create table a_merge like alltypesorc +PREHOOK: query: create table a_merge like alltypesorc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@a_merge -POSTHOOK: query: -- incompatible merge test (stripe statistics missing) - -create table a_merge like alltypesorc +POSTHOOK: query: create table a_merge like alltypesorc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@a_merge diff --git a/ql/src/test/results/clientpositive/orc_merge_diff_fs.q.out b/ql/src/test/results/clientpositive/orc_merge_diff_fs.q.out index 3f047da..a83e85b 100644 --- a/ql/src/test/results/clientpositive/orc_merge_diff_fs.q.out +++ b/ql/src/test/results/clientpositive/orc_merge_diff_fs.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE orcfile_merge1 +PREHOOK: query: DROP TABLE orcfile_merge1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE orcfile_merge1 +POSTHOOK: query: DROP TABLE orcfile_merge1 POSTHOOK: type: DROPTABLE PREHOOK: query: DROP TABLE orcfile_merge1b PREHOOK: type: DROPTABLE @@ -44,14 +40,12 @@ POSTHOOK: query: CREATE TABLE orcfile_merge1c (key INT, value STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@orcfile_merge1c -PREHOOK: query: -- merge disabled -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- merge disabled -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src @@ -116,14 +110,12 @@ POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).key EXPRESSION [(src)sr POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 2 items #### A masked pattern was here #### -PREHOOK: query: -- auto-merge slow way -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- auto-merge slow way -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src @@ -232,14 +224,12 @@ POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).key EXPRESSION [(src)s POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items #### A masked pattern was here #### -PREHOOK: query: -- auto-merge fast way -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- auto-merge fast way -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src @@ -338,8 +328,7 @@ POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).key EXPRESSION [(src)s POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items #### A masked pattern was here #### -PREHOOK: query: -- Verify -SELECT SUM(HASH(c)) FROM ( +PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM orcfile_merge1 WHERE ds='1' ) t @@ -348,8 +337,7 @@ PREHOOK: Input: default@orcfile_merge1 PREHOOK: Input: default@orcfile_merge1@ds=1/part=0 PREHOOK: Input: default@orcfile_merge1@ds=1/part=1 #### A masked pattern was here #### -POSTHOOK: query: -- Verify -SELECT SUM(HASH(c)) FROM ( +POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM orcfile_merge1 WHERE ds='1' ) t diff --git a/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out b/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out index 7a9c772..fcf1c68 100644 --- a/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out +++ b/ql/src/test/results/clientpositive/orc_merge_incompat1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@orc_merge5 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@orc_merge5 @@ -26,11 +22,9 @@ POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' in POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@orc_merge5 -PREHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 PREHOOK: type: QUERY -POSTHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -151,13 +145,11 @@ POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchem POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: -- 5 files total -analyze table orc_merge5b compute statistics noscan +PREHOOK: query: analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: -- 5 files total -analyze table orc_merge5b compute statistics noscan +POSTHOOK: query: analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b @@ -197,13 +189,11 @@ POSTHOOK: query: alter table orc_merge5b concatenate POSTHOOK: type: ALTER_TABLE_MERGE POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -PREHOOK: query: -- 3 file after merging - all 0.12 format files will be merged and 0.11 files will be left behind -analyze table orc_merge5b compute statistics noscan +PREHOOK: query: analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: -- 3 file after merging - all 0.12 format files will be merged and 0.11 files will be left behind -analyze table orc_merge5b compute statistics noscan +POSTHOOK: query: analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b diff --git a/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out b/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out index b437e16..a27041f 100644 --- a/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out +++ b/ql/src/test/results/clientpositive/orc_merge_incompat2.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- orc merge file tests for dynamic partition case - -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@orc_merge5 -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- orc merge file tests for dynamic partition case - -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@orc_merge5 diff --git a/ql/src/test/results/clientpositive/orc_ppd_boolean.q.out b/ql/src/test/results/clientpositive/orc_ppd_boolean.q.out index 0526341..b0b8a32 100644 --- a/ql/src/test/results/clientpositive/orc_ppd_boolean.q.out +++ b/ql/src/test/results/clientpositive/orc_ppd_boolean.q.out @@ -18,13 +18,11 @@ POSTHOOK: Lineage: newtypesorc.b EXPRESSION [] POSTHOOK: Lineage: newtypesorc.c EXPRESSION [] POSTHOOK: Lineage: newtypesorc.d EXPRESSION [] POSTHOOK: Lineage: newtypesorc.v EXPRESSION [] -PREHOOK: query: -- char data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select sum(hash(*)) from newtypesorc where b=true +PREHOOK: query: select sum(hash(*)) from newtypesorc where b=true PREHOOK: type: QUERY PREHOOK: Input: default@newtypesorc #### A masked pattern was here #### -POSTHOOK: query: -- char data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select sum(hash(*)) from newtypesorc where b=true +POSTHOOK: query: select sum(hash(*)) from newtypesorc where b=true POSTHOOK: type: QUERY POSTHOOK: Input: default@newtypesorc #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/orc_ppd_char.q.out b/ql/src/test/results/clientpositive/orc_ppd_char.q.out index 8af2c5a..13ef7b1 100644 --- a/ql/src/test/results/clientpositive/orc_ppd_char.q.out +++ b/ql/src/test/results/clientpositive/orc_ppd_char.q.out @@ -18,13 +18,11 @@ POSTHOOK: Lineage: newtypesorc.c EXPRESSION [] POSTHOOK: Lineage: newtypesorc.d EXPRESSION [] POSTHOOK: Lineage: newtypesorc.da EXPRESSION [] POSTHOOK: Lineage: newtypesorc.v EXPRESSION [] -PREHOOK: query: -- char data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select sum(hash(*)) from newtypesorc where c="apple" +PREHOOK: query: select sum(hash(*)) from newtypesorc where c="apple" PREHOOK: type: QUERY PREHOOK: Input: default@newtypesorc #### A masked pattern was here #### -POSTHOOK: query: -- char data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select sum(hash(*)) from newtypesorc where c="apple" +POSTHOOK: query: select sum(hash(*)) from newtypesorc where c="apple" POSTHOOK: type: QUERY POSTHOOK: Input: default@newtypesorc #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/orc_remove_cols.q.out b/ql/src/test/results/clientpositive/orc_remove_cols.q.out index b449b87..178a974 100644 --- a/ql/src/test/results/clientpositive/orc_remove_cols.q.out +++ b/ql/src/test/results/clientpositive/orc_remove_cols.q.out @@ -26,13 +26,11 @@ POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: default@orc_partitioned@ds=tomorrow POSTHOOK: Lineage: orc_partitioned PARTITION(ds=tomorrow).a SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] POSTHOOK: Lineage: orc_partitioned PARTITION(ds=tomorrow).b SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cstring1, type:string, comment:null), ] -PREHOOK: query: -- Use the old change the SERDE trick to avoid ORC DDL checks... and remove a column on the end. -ALTER TABLE orc_partitioned SET SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +PREHOOK: query: ALTER TABLE orc_partitioned SET SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' PREHOOK: type: ALTERTABLE_SERIALIZER PREHOOK: Input: default@orc_partitioned PREHOOK: Output: default@orc_partitioned -POSTHOOK: query: -- Use the old change the SERDE trick to avoid ORC DDL checks... and remove a column on the end. -ALTER TABLE orc_partitioned SET SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' +POSTHOOK: query: ALTER TABLE orc_partitioned SET SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' POSTHOOK: type: ALTERTABLE_SERIALIZER POSTHOOK: Input: default@orc_partitioned POSTHOOK: Output: default@orc_partitioned diff --git a/ql/src/test/results/clientpositive/orc_schema_evolution.q.out b/ql/src/test/results/clientpositive/orc_schema_evolution.q.out index 9707b14..b536a75 100644 --- a/ql/src/test/results/clientpositive/orc_schema_evolution.q.out +++ b/ql/src/test/results/clientpositive/orc_schema_evolution.q.out @@ -14,13 +14,11 @@ POSTHOOK: query: create table src_orc2 (key smallint, val string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_orc2 -PREHOOK: query: -- integer type widening -insert overwrite table src_orc select * from src +PREHOOK: query: insert overwrite table src_orc select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@src_orc -POSTHOOK: query: -- integer type widening -insert overwrite table src_orc select * from src +POSTHOOK: query: insert overwrite table src_orc select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@src_orc @@ -86,13 +84,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src_orc #### A masked pattern was here #### 36214430891 -PREHOOK: query: -- replace columns for adding columns and type widening -insert overwrite table src_orc2 select * from src +PREHOOK: query: insert overwrite table src_orc2 select * from src PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@src_orc2 -POSTHOOK: query: -- replace columns for adding columns and type widening -insert overwrite table src_orc2 select * from src +POSTHOOK: query: insert overwrite table src_orc2 select * from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@src_orc2 diff --git a/ql/src/test/results/clientpositive/orc_vectorization_ppd.q.out b/ql/src/test/results/clientpositive/orc_vectorization_ppd.q.out index 738abc4..35b204b 100644 --- a/ql/src/test/results/clientpositive/orc_vectorization_ppd.q.out +++ b/ql/src/test/results/clientpositive/orc_vectorization_ppd.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- create table with 1000 rows -create table srcorc(key string, value string) stored as textfile +PREHOOK: query: create table srcorc(key string, value string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@srcorc -POSTHOOK: query: -- create table with 1000 rows -create table srcorc(key string, value string) stored as textfile +POSTHOOK: query: create table srcorc(key string, value string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@srcorc @@ -28,8 +26,7 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@srcorc POSTHOOK: Lineage: srcorc.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: srcorc.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- load table with each row group having 1000 rows and stripe 1 & 2 having 5000 & 2000 rows respectively -create table if not exists vectororc +PREHOOK: query: create table if not exists vectororc (s1 string, s2 string, d double, @@ -38,8 +35,7 @@ stored as ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="10 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@vectororc -POSTHOOK: query: -- load table with each row group having 1000 rows and stripe 1 & 2 having 5000 & 2000 rows respectively -create table if not exists vectororc +POSTHOOK: query: create table if not exists vectororc (s1 string, s2 string, d double, @@ -48,13 +44,11 @@ stored as ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="10 POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@vectororc -PREHOOK: query: -- insert creates separate orc files -insert overwrite table vectororc select "apple", "a", rand(1), "zoo" from srcorc +PREHOOK: query: insert overwrite table vectororc select "apple", "a", rand(1), "zoo" from srcorc PREHOOK: type: QUERY PREHOOK: Input: default@srcorc PREHOOK: Output: default@vectororc -POSTHOOK: query: -- insert creates separate orc files -insert overwrite table vectororc select "apple", "a", rand(1), "zoo" from srcorc +POSTHOOK: query: insert overwrite table vectororc select "apple", "a", rand(1), "zoo" from srcorc POSTHOOK: type: QUERY POSTHOOK: Input: default@srcorc POSTHOOK: Output: default@vectororc @@ -134,8 +128,7 @@ POSTHOOK: Lineage: vectororc.d EXPRESSION [] POSTHOOK: Lineage: vectororc.s1 EXPRESSION [] POSTHOOK: Lineage: vectororc.s2 SIMPLE [] POSTHOOK: Lineage: vectororc.s3 SIMPLE [] -PREHOOK: query: -- since vectororc table has multiple orc file we will load them into a single file using another table -create table if not exists testorc +PREHOOK: query: create table if not exists testorc (s1 string, s2 string, d double, @@ -144,8 +137,7 @@ stored as ORC tblproperties("orc.row.index.stride"="1000", "orc.stripe.size"="10 PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@testorc -POSTHOOK: query: -- since vectororc table has multiple orc file we will load them into a single file using another table -create table if not exists testorc +POSTHOOK: query: create table if not exists testorc (s1 string, s2 string, d double, @@ -166,98 +158,74 @@ POSTHOOK: Lineage: testorc.d SIMPLE [(vectororc)vectororc.FieldSchema(name:d, ty POSTHOOK: Lineage: testorc.s1 SIMPLE [(vectororc)vectororc.FieldSchema(name:s1, type:string, comment:null), ] POSTHOOK: Lineage: testorc.s2 SIMPLE [(vectororc)vectororc.FieldSchema(name:s2, type:string, comment:null), ] POSTHOOK: Lineage: testorc.s3 SIMPLE [(vectororc)vectororc.FieldSchema(name:s3, type:string, comment:null), ] -PREHOOK: query: -- row group (1,4) from stripe 1 and row group (1) from stripe 2 --- PPD ONLY -select count(*),int(sum(d)) from testorc where s1 is not null +PREHOOK: query: select count(*),int(sum(d)) from testorc where s1 is not null PREHOOK: type: QUERY PREHOOK: Input: default@testorc #### A masked pattern was here #### -POSTHOOK: query: -- row group (1,4) from stripe 1 and row group (1) from stripe 2 --- PPD ONLY -select count(*),int(sum(d)) from testorc where s1 is not null +POSTHOOK: query: select count(*),int(sum(d)) from testorc where s1 is not null POSTHOOK: type: QUERY POSTHOOK: Input: default@testorc #### A masked pattern was here #### 3000 1505 -PREHOOK: query: -- VECTORIZATION + PPD -select count(*),int(sum(d)) from testorc where s1 is not null +PREHOOK: query: select count(*),int(sum(d)) from testorc where s1 is not null PREHOOK: type: QUERY PREHOOK: Input: default@testorc #### A masked pattern was here #### -POSTHOOK: query: -- VECTORIZATION + PPD -select count(*),int(sum(d)) from testorc where s1 is not null +POSTHOOK: query: select count(*),int(sum(d)) from testorc where s1 is not null POSTHOOK: type: QUERY POSTHOOK: Input: default@testorc #### A masked pattern was here #### 3000 1505 -PREHOOK: query: -- row group (2,3,5) from stripe 1 and row group (2) from stripe 2 --- PPD ONLY -select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g") +PREHOOK: query: select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g") PREHOOK: type: QUERY PREHOOK: Input: default@testorc #### A masked pattern was here #### -POSTHOOK: query: -- row group (2,3,5) from stripe 1 and row group (2) from stripe 2 --- PPD ONLY -select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g") +POSTHOOK: query: select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g") POSTHOOK: type: QUERY POSTHOOK: Input: default@testorc #### A masked pattern was here #### 4000 2006 -PREHOOK: query: -- VECTORIZATION + PPD -select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g") +PREHOOK: query: select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g") PREHOOK: type: QUERY PREHOOK: Input: default@testorc #### A masked pattern was here #### -POSTHOOK: query: -- VECTORIZATION + PPD -select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g") +POSTHOOK: query: select count(*),int(sum(d)) from testorc where s2 in ("b", "c", "e", "g") POSTHOOK: type: QUERY POSTHOOK: Input: default@testorc #### A masked pattern was here #### 4000 2006 -PREHOOK: query: -- last row group of stripe 1 and first row group of stripe 2 --- PPD ONLY -select count(*),int(sum(d)) from testorc where s3="z" +PREHOOK: query: select count(*),int(sum(d)) from testorc where s3="z" PREHOOK: type: QUERY PREHOOK: Input: default@testorc #### A masked pattern was here #### -POSTHOOK: query: -- last row group of stripe 1 and first row group of stripe 2 --- PPD ONLY -select count(*),int(sum(d)) from testorc where s3="z" +POSTHOOK: query: select count(*),int(sum(d)) from testorc where s3="z" POSTHOOK: type: QUERY POSTHOOK: Input: default@testorc #### A masked pattern was here #### 2000 1011 -PREHOOK: query: -- VECTORIZATION + PPD -select count(*),int(sum(d)) from testorc where s3="z" +PREHOOK: query: select count(*),int(sum(d)) from testorc where s3="z" PREHOOK: type: QUERY PREHOOK: Input: default@testorc #### A masked pattern was here #### -POSTHOOK: query: -- VECTORIZATION + PPD -select count(*),int(sum(d)) from testorc where s3="z" +POSTHOOK: query: select count(*),int(sum(d)) from testorc where s3="z" POSTHOOK: type: QUERY POSTHOOK: Input: default@testorc #### A masked pattern was here #### 2000 1011 -PREHOOK: query: -- first row group of stripe 1 and last row group of stripe 2 --- PPD ONLY -select count(*),int(sum(d)) from testorc where s2="a" or s2="g" +PREHOOK: query: select count(*),int(sum(d)) from testorc where s2="a" or s2="g" PREHOOK: type: QUERY PREHOOK: Input: default@testorc #### A masked pattern was here #### -POSTHOOK: query: -- first row group of stripe 1 and last row group of stripe 2 --- PPD ONLY -select count(*),int(sum(d)) from testorc where s2="a" or s2="g" +POSTHOOK: query: select count(*),int(sum(d)) from testorc where s2="a" or s2="g" POSTHOOK: type: QUERY POSTHOOK: Input: default@testorc #### A masked pattern was here #### 2000 1006 -PREHOOK: query: -- VECTORIZATION + PPD -select count(*),int(sum(d)) from testorc where s2="a" or s2="g" +PREHOOK: query: select count(*),int(sum(d)) from testorc where s2="a" or s2="g" PREHOOK: type: QUERY PREHOOK: Input: default@testorc #### A masked pattern was here #### -POSTHOOK: query: -- VECTORIZATION + PPD -select count(*),int(sum(d)) from testorc where s2="a" or s2="g" +POSTHOOK: query: select count(*),int(sum(d)) from testorc where s2="a" or s2="g" POSTHOOK: type: QUERY POSTHOOK: Input: default@testorc #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/outer_join_ppr.q.out b/ql/src/test/results/clientpositive/outer_join_ppr.q.out index cf20851..0d3b4f0 100644 --- a/ql/src/test/results/clientpositive/outer_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/outer_join_ppr.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED FROM src a FULL OUTER JOIN @@ -9,9 +7,7 @@ EXPLAIN EXTENDED SELECT a.key, a.value, b.key, b.value WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED FROM src a FULL OUTER JOIN diff --git a/ql/src/test/results/clientpositive/parallel.q.out b/ql/src/test/results/clientpositive/parallel.q.out index 737ceda..459105e 100644 --- a/ql/src/test/results/clientpositive/parallel.q.out +++ b/ql/src/test/results/clientpositive/parallel.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table if not exists src_a like src +PREHOOK: query: create table if not exists src_a like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_a -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table if not exists src_a like src +POSTHOOK: query: create table if not exists src_a like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_a diff --git a/ql/src/test/results/clientpositive/parallel_join0.q.out b/ql/src/test/results/clientpositive/parallel_join0.q.out index 392412d..c02319e 100644 --- a/ql/src/test/results/clientpositive/parallel_join0.q.out +++ b/ql/src/test/results/clientpositive/parallel_join0.q.out @@ -1,7 +1,5 @@ Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Stage 'Stage-1:MAPRED' is a cross product -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 FROM (SELECT * FROM src WHERE src.key < 10) src1 @@ -9,9 +7,7 @@ SELECT src1.key as k1, src1.value as v1, (SELECT * FROM src WHERE src.key < 10) src2 SORT BY k1, v1, k2, v2 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 FROM (SELECT * FROM src WHERE src.key < 10) src1 diff --git a/ql/src/test/results/clientpositive/parallel_join1.q.out b/ql/src/test/results/clientpositive/parallel_join1.q.out index fbe5f91..8843661 100644 --- a/ql/src/test/results/clientpositive/parallel_join1.q.out +++ b/ql/src/test/results/clientpositive/parallel_join1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/parquet_analyze.q.out b/ql/src/test/results/clientpositive/parquet_analyze.q.out index 4429c0a..d3cdc3f 100644 --- a/ql/src/test/results/clientpositive/parquet_analyze.q.out +++ b/ql/src/test/results/clientpositive/parquet_analyze.q.out @@ -67,12 +67,10 @@ POSTHOOK: Lineage: parquet_create_people.last_name SIMPLE [(parquet_create_peopl POSTHOOK: Lineage: parquet_create_people.salary SIMPLE [(parquet_create_people_staging)parquet_create_people_staging.FieldSchema(name:salary, type:decimal(10,0), comment:null), ] POSTHOOK: Lineage: parquet_create_people.start_date SIMPLE [(parquet_create_people_staging)parquet_create_people_staging.FieldSchema(name:start_date, type:timestamp, comment:null), ] POSTHOOK: Lineage: parquet_create_people.state SIMPLE [(parquet_create_people_staging)parquet_create_people_staging.FieldSchema(name:state, type:string, comment:null), ] -PREHOOK: query: -- describe the table first. This should contain un-updated stats. -DESC FORMATTED parquet_create_people +PREHOOK: query: DESC FORMATTED parquet_create_people PREHOOK: type: DESCTABLE PREHOOK: Input: default@parquet_create_people -POSTHOOK: query: -- describe the table first. This should contain un-updated stats. -DESC FORMATTED parquet_create_people +POSTHOOK: query: DESC FORMATTED parquet_create_people POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@parquet_create_people # col_name data_type comment @@ -109,13 +107,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- now run noscan and re-check the stats, and they should be updated. -ANALYZE TABLE parquet_create_people COMPUTE STATISTICS noscan +PREHOOK: query: ANALYZE TABLE parquet_create_people COMPUTE STATISTICS noscan PREHOOK: type: QUERY PREHOOK: Input: default@parquet_create_people PREHOOK: Output: default@parquet_create_people -POSTHOOK: query: -- now run noscan and re-check the stats, and they should be updated. -ANALYZE TABLE parquet_create_people COMPUTE STATISTICS noscan +POSTHOOK: query: ANALYZE TABLE parquet_create_people COMPUTE STATISTICS noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@parquet_create_people POSTHOOK: Output: default@parquet_create_people @@ -159,13 +155,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- clean up -DROP TABLE parquet_create_people_staging +PREHOOK: query: DROP TABLE parquet_create_people_staging PREHOOK: type: DROPTABLE PREHOOK: Input: default@parquet_create_people_staging PREHOOK: Output: default@parquet_create_people_staging -POSTHOOK: query: -- clean up -DROP TABLE parquet_create_people_staging +POSTHOOK: query: DROP TABLE parquet_create_people_staging POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@parquet_create_people_staging POSTHOOK: Output: default@parquet_create_people_staging diff --git a/ql/src/test/results/clientpositive/parquet_array_of_multi_field_struct.q.out b/ql/src/test/results/clientpositive/parquet_array_of_multi_field_struct.q.out index 61f38af..a46dca4 100644 --- a/ql/src/test/results/clientpositive/parquet_array_of_multi_field_struct.q.out +++ b/ql/src/test/results/clientpositive/parquet_array_of_multi_field_struct.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- this test creates a Parquet table with an array of multi-field structs - -CREATE TABLE parquet_array_of_multi_field_structs ( +PREHOOK: query: CREATE TABLE parquet_array_of_multi_field_structs ( locations ARRAY> ) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@parquet_array_of_multi_field_structs -POSTHOOK: query: -- this test creates a Parquet table with an array of multi-field structs - -CREATE TABLE parquet_array_of_multi_field_structs ( +POSTHOOK: query: CREATE TABLE parquet_array_of_multi_field_structs ( locations ARRAY> ) STORED AS PARQUET POSTHOOK: type: CREATETABLE @@ -41,19 +37,13 @@ POSTHOOK: query: DROP TABLE parquet_array_of_multi_field_structs POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@parquet_array_of_multi_field_structs POSTHOOK: Output: default@parquet_array_of_multi_field_structs -PREHOOK: query: -- maps use the same writable structure, so validate that the data can be read --- as a map instead of an array of structs - -CREATE TABLE parquet_map_view_of_multi_field_structs ( +PREHOOK: query: CREATE TABLE parquet_map_view_of_multi_field_structs ( locations MAP ) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@parquet_map_view_of_multi_field_structs -POSTHOOK: query: -- maps use the same writable structure, so validate that the data can be read --- as a map instead of an array of structs - -CREATE TABLE parquet_map_view_of_multi_field_structs ( +POSTHOOK: query: CREATE TABLE parquet_map_view_of_multi_field_structs ( locations MAP ) STORED AS PARQUET POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/parquet_array_of_optional_elements.q.out b/ql/src/test/results/clientpositive/parquet_array_of_optional_elements.q.out index 5ccb1e7..2f7a0dc 100644 --- a/ql/src/test/results/clientpositive/parquet_array_of_optional_elements.q.out +++ b/ql/src/test/results/clientpositive/parquet_array_of_optional_elements.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- this test creates a Parquet table with an array of optional structs - -CREATE TABLE parquet_array_of_optional_elements ( +PREHOOK: query: CREATE TABLE parquet_array_of_optional_elements ( locations ARRAY> ) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@parquet_array_of_optional_elements -POSTHOOK: query: -- this test creates a Parquet table with an array of optional structs - -CREATE TABLE parquet_array_of_optional_elements ( +POSTHOOK: query: CREATE TABLE parquet_array_of_optional_elements ( locations ARRAY> ) STORED AS PARQUET POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/parquet_array_of_required_elements.q.out b/ql/src/test/results/clientpositive/parquet_array_of_required_elements.q.out index 18165e8..6d8a559 100644 --- a/ql/src/test/results/clientpositive/parquet_array_of_required_elements.q.out +++ b/ql/src/test/results/clientpositive/parquet_array_of_required_elements.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- this test creates a Parquet table with an array of structs - -CREATE TABLE parquet_array_of_required_elements ( +PREHOOK: query: CREATE TABLE parquet_array_of_required_elements ( locations ARRAY> ) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@parquet_array_of_required_elements -POSTHOOK: query: -- this test creates a Parquet table with an array of structs - -CREATE TABLE parquet_array_of_required_elements ( +POSTHOOK: query: CREATE TABLE parquet_array_of_required_elements ( locations ARRAY> ) STORED AS PARQUET POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/parquet_array_of_single_field_struct.q.out b/ql/src/test/results/clientpositive/parquet_array_of_single_field_struct.q.out index 31c4c68..ba7e3ce 100644 --- a/ql/src/test/results/clientpositive/parquet_array_of_single_field_struct.q.out +++ b/ql/src/test/results/clientpositive/parquet_array_of_single_field_struct.q.out @@ -1,18 +1,10 @@ -PREHOOK: query: -- this test creates a Parquet table with an array of single-field structs --- that has an ambiguous Parquet schema that is assumed to be a list of bigints --- This is verifies compliance with the spec for this case. - -CREATE TABLE parquet_ambiguous_array_of_single_field_structs ( +PREHOOK: query: CREATE TABLE parquet_ambiguous_array_of_single_field_structs ( single_element_groups ARRAY ) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@parquet_ambiguous_array_of_single_field_structs -POSTHOOK: query: -- this test creates a Parquet table with an array of single-field structs --- that has an ambiguous Parquet schema that is assumed to be a list of bigints --- This is verifies compliance with the spec for this case. - -CREATE TABLE parquet_ambiguous_array_of_single_field_structs ( +POSTHOOK: query: CREATE TABLE parquet_ambiguous_array_of_single_field_structs ( single_element_groups ARRAY ) STORED AS PARQUET POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/parquet_array_of_structs.q.out b/ql/src/test/results/clientpositive/parquet_array_of_structs.q.out index d3ac709..2d00f2f 100644 --- a/ql/src/test/results/clientpositive/parquet_array_of_structs.q.out +++ b/ql/src/test/results/clientpositive/parquet_array_of_structs.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- this test creates a Parquet table with an array of structs - -CREATE TABLE parquet_array_of_structs ( +PREHOOK: query: CREATE TABLE parquet_array_of_structs ( locations ARRAY> ) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@parquet_array_of_structs -POSTHOOK: query: -- this test creates a Parquet table with an array of structs - -CREATE TABLE parquet_array_of_structs ( +POSTHOOK: query: CREATE TABLE parquet_array_of_structs ( locations ARRAY> ) STORED AS PARQUET POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/parquet_array_of_unannotated_groups.q.out b/ql/src/test/results/clientpositive/parquet_array_of_unannotated_groups.q.out index af116fa..5df79b1 100644 --- a/ql/src/test/results/clientpositive/parquet_array_of_unannotated_groups.q.out +++ b/ql/src/test/results/clientpositive/parquet_array_of_unannotated_groups.q.out @@ -1,16 +1,10 @@ -PREHOOK: query: -- this test creates a Parquet table from a structure with an unannotated --- repeated structure of (x,y) structs - -CREATE TABLE parquet_array_of_unannotated_groups ( +PREHOOK: query: CREATE TABLE parquet_array_of_unannotated_groups ( list_of_points ARRAY> ) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@parquet_array_of_unannotated_groups -POSTHOOK: query: -- this test creates a Parquet table from a structure with an unannotated --- repeated structure of (x,y) structs - -CREATE TABLE parquet_array_of_unannotated_groups ( +POSTHOOK: query: CREATE TABLE parquet_array_of_unannotated_groups ( list_of_points ARRAY> ) STORED AS PARQUET POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/parquet_array_of_unannotated_primitives.q.out b/ql/src/test/results/clientpositive/parquet_array_of_unannotated_primitives.q.out index d2be684..587a122 100644 --- a/ql/src/test/results/clientpositive/parquet_array_of_unannotated_primitives.q.out +++ b/ql/src/test/results/clientpositive/parquet_array_of_unannotated_primitives.q.out @@ -1,16 +1,10 @@ -PREHOOK: query: -- this test creates a Parquet table from a structure with an unannotated --- repeated structure of int32s - -CREATE TABLE parquet_array_of_unannotated_ints ( +PREHOOK: query: CREATE TABLE parquet_array_of_unannotated_ints ( list_of_ints ARRAY ) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@parquet_array_of_unannotated_ints -POSTHOOK: query: -- this test creates a Parquet table from a structure with an unannotated --- repeated structure of int32s - -CREATE TABLE parquet_array_of_unannotated_ints ( +POSTHOOK: query: CREATE TABLE parquet_array_of_unannotated_ints ( list_of_ints ARRAY ) STORED AS PARQUET POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/parquet_avro_array_of_primitives.q.out b/ql/src/test/results/clientpositive/parquet_avro_array_of_primitives.q.out index 0776853..916dbb4 100644 --- a/ql/src/test/results/clientpositive/parquet_avro_array_of_primitives.q.out +++ b/ql/src/test/results/clientpositive/parquet_avro_array_of_primitives.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- this test creates a Parquet table with an array of structs - -CREATE TABLE parquet_avro_array_of_primitives ( +PREHOOK: query: CREATE TABLE parquet_avro_array_of_primitives ( list_of_ints ARRAY ) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@parquet_avro_array_of_primitives -POSTHOOK: query: -- this test creates a Parquet table with an array of structs - -CREATE TABLE parquet_avro_array_of_primitives ( +POSTHOOK: query: CREATE TABLE parquet_avro_array_of_primitives ( list_of_ints ARRAY ) STORED AS PARQUET POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/parquet_avro_array_of_single_field_struct.q.out b/ql/src/test/results/clientpositive/parquet_avro_array_of_single_field_struct.q.out index cb460b5..03205f4 100644 --- a/ql/src/test/results/clientpositive/parquet_avro_array_of_single_field_struct.q.out +++ b/ql/src/test/results/clientpositive/parquet_avro_array_of_single_field_struct.q.out @@ -1,16 +1,10 @@ -PREHOOK: query: -- this test creates a Parquet table with an array of single-field structs --- as written by parquet-avro - -CREATE TABLE parquet_avro_array_of_single_field_structs ( +PREHOOK: query: CREATE TABLE parquet_avro_array_of_single_field_structs ( single_element_groups ARRAY> ) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@parquet_avro_array_of_single_field_structs -POSTHOOK: query: -- this test creates a Parquet table with an array of single-field structs --- as written by parquet-avro - -CREATE TABLE parquet_avro_array_of_single_field_structs ( +POSTHOOK: query: CREATE TABLE parquet_avro_array_of_single_field_structs ( single_element_groups ARRAY> ) STORED AS PARQUET POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/parquet_join.q.out b/ql/src/test/results/clientpositive/parquet_join.q.out index 86fb64a..c10ca19 100644 --- a/ql/src/test/results/clientpositive/parquet_join.q.out +++ b/ql/src/test/results/clientpositive/parquet_join.q.out @@ -61,17 +61,9 @@ POSTHOOK: Output: default@parquet_jointable2 POSTHOOK: Lineage: parquet_jointable2.c1 EXPRESSION [(staging)staging.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: parquet_jointable2.key SIMPLE [(staging)staging.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: parquet_jointable2.myvalue EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: -- SORT_QUERY_RESULTS - --- MR join - -explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key +PREHOOK: query: explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- MR join - -explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key +POSTHOOK: query: explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -151,21 +143,9 @@ POSTHOOK: Input: default@parquet_jointable2 #### A masked pattern was here #### val_0value val_10value -PREHOOK: query: -- The two tables involved in the join have differing number of columns(table1-2,table2-3). In case of Map and SMB join, --- when the second table is loaded, the column indices in hive.io.file.readcolumn.ids refer to columns of both the first and the second table --- and hence the parquet schema/types passed to ParquetInputSplit should contain only the column indexes belonging to second/current table - --- Map join - -explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key +PREHOOK: query: explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key PREHOOK: type: QUERY -POSTHOOK: query: -- The two tables involved in the join have differing number of columns(table1-2,table2-3). In case of Map and SMB join, --- when the second table is loaded, the column indices in hive.io.file.readcolumn.ids refer to columns of both the first and the second table --- and hence the parquet schema/types passed to ParquetInputSplit should contain only the column indexes belonging to second/current table - --- Map join - -explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key +POSTHOOK: query: explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -249,15 +229,11 @@ POSTHOOK: Input: default@parquet_jointable2 #### A masked pattern was here #### val_0value val_10value -PREHOOK: query: -- SMB join - -create table parquet_jointable1_bucketed_sorted (key int,value string) clustered by (key) sorted by (key ASC) INTO 1 BUCKETS stored as parquet +PREHOOK: query: create table parquet_jointable1_bucketed_sorted (key int,value string) clustered by (key) sorted by (key ASC) INTO 1 BUCKETS stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@parquet_jointable1_bucketed_sorted -POSTHOOK: query: -- SMB join - -create table parquet_jointable1_bucketed_sorted (key int,value string) clustered by (key) sorted by (key ASC) INTO 1 BUCKETS stored as parquet +POSTHOOK: query: create table parquet_jointable1_bucketed_sorted (key int,value string) clustered by (key) sorted by (key ASC) INTO 1 BUCKETS stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@parquet_jointable1_bucketed_sorted diff --git a/ql/src/test/results/clientpositive/parquet_map_null.q.out b/ql/src/test/results/clientpositive/parquet_map_null.q.out index d1357c1..f2a4304 100644 --- a/ql/src/test/results/clientpositive/parquet_map_null.q.out +++ b/ql/src/test/results/clientpositive/parquet_map_null.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- This test attempts to write a parquet table from an avro table that contains map null values - -DROP TABLE IF EXISTS avro_table +PREHOOK: query: DROP TABLE IF EXISTS avro_table PREHOOK: type: DROPTABLE -POSTHOOK: query: -- This test attempts to write a parquet table from an avro table that contains map null values - -DROP TABLE IF EXISTS avro_table +POSTHOOK: query: DROP TABLE IF EXISTS avro_table POSTHOOK: type: DROPTABLE PREHOOK: query: DROP TABLE IF EXISTS parquet_table PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/parquet_map_of_arrays_of_ints.q.out b/ql/src/test/results/clientpositive/parquet_map_of_arrays_of_ints.q.out index 8d5b8f2..d76f92e 100644 --- a/ql/src/test/results/clientpositive/parquet_map_of_arrays_of_ints.q.out +++ b/ql/src/test/results/clientpositive/parquet_map_of_arrays_of_ints.q.out @@ -1,16 +1,10 @@ -PREHOOK: query: -- this test reads and writes a parquet file with a map of arrays of ints --- validates PARQUET-26 is fixed - -CREATE TABLE parquet_map_of_arrays_of_ints ( +PREHOOK: query: CREATE TABLE parquet_map_of_arrays_of_ints ( examples MAP> ) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@parquet_map_of_arrays_of_ints -POSTHOOK: query: -- this test reads and writes a parquet file with a map of arrays of ints --- validates PARQUET-26 is fixed - -CREATE TABLE parquet_map_of_arrays_of_ints ( +POSTHOOK: query: CREATE TABLE parquet_map_of_arrays_of_ints ( examples MAP> ) STORED AS PARQUET POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/parquet_map_of_maps.q.out b/ql/src/test/results/clientpositive/parquet_map_of_maps.q.out index 4c26b45..242fa40 100644 --- a/ql/src/test/results/clientpositive/parquet_map_of_maps.q.out +++ b/ql/src/test/results/clientpositive/parquet_map_of_maps.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- this test reads and writes a parquet file with a map of maps - -CREATE TABLE parquet_map_of_maps ( +PREHOOK: query: CREATE TABLE parquet_map_of_maps ( map_of_maps MAP> ) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@parquet_map_of_maps -POSTHOOK: query: -- this test reads and writes a parquet file with a map of maps - -CREATE TABLE parquet_map_of_maps ( +POSTHOOK: query: CREATE TABLE parquet_map_of_maps ( map_of_maps MAP> ) STORED AS PARQUET POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/parquet_mixed_partition_formats.q.out b/ql/src/test/results/clientpositive/parquet_mixed_partition_formats.q.out index e96aa80..20fcb93 100644 --- a/ql/src/test/results/clientpositive/parquet_mixed_partition_formats.q.out +++ b/ql/src/test/results/clientpositive/parquet_mixed_partition_formats.q.out @@ -48,13 +48,11 @@ MAP KEYS TERMINATED BY ':' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@parquet_mixed_partition_formats -PREHOOK: query: ---- partition dateint=20140330 is stored as TEXTFILE -LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_mixed_partition_formats PARTITION (dateint=20140330) +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_mixed_partition_formats PARTITION (dateint=20140330) PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@parquet_mixed_partition_formats -POSTHOOK: query: ---- partition dateint=20140330 is stored as TEXTFILE -LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_mixed_partition_formats PARTITION (dateint=20140330) +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_mixed_partition_formats PARTITION (dateint=20140330) POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@parquet_mixed_partition_formats @@ -144,9 +142,7 @@ Storage Desc Params: field.delim | mapkey.delim : serialization.format | -PREHOOK: query: ---change table serde and file format to PARQUET---- - -ALTER TABLE parquet_mixed_partition_formats +PREHOOK: query: ALTER TABLE parquet_mixed_partition_formats SET FILEFORMAT INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat' @@ -154,9 +150,7 @@ ALTER TABLE parquet_mixed_partition_formats PREHOOK: type: ALTERTABLE_FILEFORMAT PREHOOK: Input: default@parquet_mixed_partition_formats PREHOOK: Output: default@parquet_mixed_partition_formats -POSTHOOK: query: ---change table serde and file format to PARQUET---- - -ALTER TABLE parquet_mixed_partition_formats +POSTHOOK: query: ALTER TABLE parquet_mixed_partition_formats SET FILEFORMAT INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat' diff --git a/ql/src/test/results/clientpositive/parquet_nested_complex.q.out b/ql/src/test/results/clientpositive/parquet_nested_complex.q.out index d7ef637..d1f0edc 100644 --- a/ql/src/test/results/clientpositive/parquet_nested_complex.q.out +++ b/ql/src/test/results/clientpositive/parquet_nested_complex.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- start with the original nestedcomplex test - -create table nestedcomplex ( +PREHOOK: query: create table nestedcomplex ( simple_int int, max_nested_array array>>>>>>>>>>>>>>>>>>>>>>, max_nested_map array>>>>>>>>>>>>>>>>>>>>>, @@ -15,9 +13,7 @@ WITH SERDEPROPERTIES ( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@nestedcomplex -POSTHOOK: query: -- start with the original nestedcomplex test - -create table nestedcomplex ( +POSTHOOK: query: create table nestedcomplex ( simple_int int, max_nested_array array>>>>>>>>>>>>>>>>>>>>>>, max_nested_map array>>>>>>>>>>>>>>>>>>>>>, @@ -64,16 +60,12 @@ POSTHOOK: query: load data local inpath '../../data/files/nested_complex.txt' ov POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@nestedcomplex -PREHOOK: query: -- and load the table into Parquet - -CREATE TABLE parquet_nested_complex STORED AS PARQUET AS SELECT * FROM nestedcomplex +PREHOOK: query: CREATE TABLE parquet_nested_complex STORED AS PARQUET AS SELECT * FROM nestedcomplex PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@nestedcomplex PREHOOK: Output: database:default PREHOOK: Output: default@parquet_nested_complex -POSTHOOK: query: -- and load the table into Parquet - -CREATE TABLE parquet_nested_complex STORED AS PARQUET AS SELECT * FROM nestedcomplex +POSTHOOK: query: CREATE TABLE parquet_nested_complex STORED AS PARQUET AS SELECT * FROM nestedcomplex POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@nestedcomplex POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/parquet_partitioned.q.out b/ql/src/test/results/clientpositive/parquet_partitioned.q.out index 3529d70..7eace7d 100644 --- a/ql/src/test/results/clientpositive/parquet_partitioned.q.out +++ b/ql/src/test/results/clientpositive/parquet_partitioned.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE parquet_partitioned_staging +PREHOOK: query: DROP TABLE parquet_partitioned_staging PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE parquet_partitioned_staging +POSTHOOK: query: DROP TABLE parquet_partitioned_staging POSTHOOK: type: DROPTABLE PREHOOK: query: DROP TABLE parquet_partitioned PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/parquet_ppd_char.q.out b/ql/src/test/results/clientpositive/parquet_ppd_char.q.out index c1565f8..4dba227 100644 --- a/ql/src/test/results/clientpositive/parquet_ppd_char.q.out +++ b/ql/src/test/results/clientpositive/parquet_ppd_char.q.out @@ -18,13 +18,11 @@ POSTHOOK: Lineage: newtypestbl.c EXPRESSION [] POSTHOOK: Lineage: newtypestbl.d EXPRESSION [] POSTHOOK: Lineage: newtypestbl.da EXPRESSION [] POSTHOOK: Lineage: newtypestbl.v EXPRESSION [] -PREHOOK: query: -- char data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select * from newtypestbl where c="apple" +PREHOOK: query: select * from newtypestbl where c="apple" PREHOOK: type: QUERY PREHOOK: Input: default@newtypestbl #### A masked pattern was here #### -POSTHOOK: query: -- char data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select * from newtypestbl where c="apple" +POSTHOOK: query: select * from newtypestbl where c="apple" POSTHOOK: type: QUERY POSTHOOK: Input: default@newtypestbl #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/parquet_ppd_date.q.out b/ql/src/test/results/clientpositive/parquet_ppd_date.q.out index 55231e9..821b4b2 100644 --- a/ql/src/test/results/clientpositive/parquet_ppd_date.q.out +++ b/ql/src/test/results/clientpositive/parquet_ppd_date.q.out @@ -18,13 +18,11 @@ POSTHOOK: Lineage: newtypestbl.c EXPRESSION [] POSTHOOK: Lineage: newtypestbl.d EXPRESSION [] POSTHOOK: Lineage: newtypestbl.da EXPRESSION [] POSTHOOK: Lineage: newtypestbl.v EXPRESSION [] -PREHOOK: query: -- date data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select * from newtypestbl where da='1970-02-20' +PREHOOK: query: select * from newtypestbl where da='1970-02-20' PREHOOK: type: QUERY PREHOOK: Input: default@newtypestbl #### A masked pattern was here #### -POSTHOOK: query: -- date data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select * from newtypestbl where da='1970-02-20' +POSTHOOK: query: select * from newtypestbl where da='1970-02-20' POSTHOOK: type: QUERY POSTHOOK: Input: default@newtypestbl #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out b/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out index a30820e..c2611fc 100644 --- a/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out +++ b/ql/src/test/results/clientpositive/parquet_ppd_decimal.q.out @@ -18,13 +18,11 @@ POSTHOOK: Lineage: newtypestbl.c EXPRESSION [] POSTHOOK: Lineage: newtypestbl.d EXPRESSION [] POSTHOOK: Lineage: newtypestbl.da EXPRESSION [] POSTHOOK: Lineage: newtypestbl.v EXPRESSION [] -PREHOOK: query: -- decimal data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select * from newtypestbl where d=0.22 +PREHOOK: query: select * from newtypestbl where d=0.22 PREHOOK: type: QUERY PREHOOK: Input: default@newtypestbl #### A masked pattern was here #### -POSTHOOK: query: -- decimal data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select * from newtypestbl where d=0.22 +POSTHOOK: query: select * from newtypestbl where d=0.22 POSTHOOK: type: QUERY POSTHOOK: Input: default@newtypestbl #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/parquet_ppd_partition.q.out b/ql/src/test/results/clientpositive/parquet_ppd_partition.q.out index 44613dd..08f8558 100644 --- a/ql/src/test/results/clientpositive/parquet_ppd_partition.q.out +++ b/ql/src/test/results/clientpositive/parquet_ppd_partition.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Test predicate with partitioned columns -CREATE TABLE part1 (id int, content string) PARTITIONED BY (p string) STORED AS PARQUET +PREHOOK: query: CREATE TABLE part1 (id int, content string) PARTITIONED BY (p string) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@part1 -POSTHOOK: query: -- Test predicate with partitioned columns -CREATE TABLE part1 (id int, content string) PARTITIONED BY (p string) STORED AS PARQUET +POSTHOOK: query: CREATE TABLE part1 (id int, content string) PARTITIONED BY (p string) STORED AS PARQUET POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@part1 diff --git a/ql/src/test/results/clientpositive/parquet_ppd_timestamp.q.out b/ql/src/test/results/clientpositive/parquet_ppd_timestamp.q.out index 3b3e5b7..979c7b8 100644 --- a/ql/src/test/results/clientpositive/parquet_ppd_timestamp.q.out +++ b/ql/src/test/results/clientpositive/parquet_ppd_timestamp.q.out @@ -18,13 +18,11 @@ POSTHOOK: Lineage: newtypestbl.c EXPRESSION [] POSTHOOK: Lineage: newtypestbl.d EXPRESSION [] POSTHOOK: Lineage: newtypestbl.ts EXPRESSION [] POSTHOOK: Lineage: newtypestbl.v EXPRESSION [] -PREHOOK: query: -- timestamp data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select * from newtypestbl where cast(ts as string)='2011-01-01 01:01:01' +PREHOOK: query: select * from newtypestbl where cast(ts as string)='2011-01-01 01:01:01' PREHOOK: type: QUERY PREHOOK: Input: default@newtypestbl #### A masked pattern was here #### -POSTHOOK: query: -- timestamp data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select * from newtypestbl where cast(ts as string)='2011-01-01 01:01:01' +POSTHOOK: query: select * from newtypestbl where cast(ts as string)='2011-01-01 01:01:01' POSTHOOK: type: QUERY POSTHOOK: Input: default@newtypestbl #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/parquet_ppd_varchar.q.out b/ql/src/test/results/clientpositive/parquet_ppd_varchar.q.out index 5a62e80..b836244 100644 --- a/ql/src/test/results/clientpositive/parquet_ppd_varchar.q.out +++ b/ql/src/test/results/clientpositive/parquet_ppd_varchar.q.out @@ -18,13 +18,11 @@ POSTHOOK: Lineage: newtypestbl.c EXPRESSION [] POSTHOOK: Lineage: newtypestbl.d EXPRESSION [] POSTHOOK: Lineage: newtypestbl.da EXPRESSION [] POSTHOOK: Lineage: newtypestbl.v EXPRESSION [] -PREHOOK: query: -- varchar data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select * from newtypestbl where v="bee" +PREHOOK: query: select * from newtypestbl where v="bee" PREHOOK: type: QUERY PREHOOK: Input: default@newtypestbl #### A masked pattern was here #### -POSTHOOK: query: -- varchar data types (EQUAL, NOT_EQUAL, LESS_THAN, LESS_THAN_EQUALS, IN, BETWEEN tests) -select * from newtypestbl where v="bee" +POSTHOOK: query: select * from newtypestbl where v="bee" POSTHOOK: type: QUERY POSTHOOK: Input: default@newtypestbl #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/parquet_read_backward_compatible_files.q.out b/ql/src/test/results/clientpositive/parquet_read_backward_compatible_files.q.out index 0c36359..22a050a 100644 --- a/ql/src/test/results/clientpositive/parquet_read_backward_compatible_files.q.out +++ b/ql/src/test/results/clientpositive/parquet_read_backward_compatible_files.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- This test makes sure that parquet can read older parquet files written by Hive <= 0.12 --- alltypesparquet is a files written by older version of Hive - -CREATE TABLE alltypesparquet ( +PREHOOK: query: CREATE TABLE alltypesparquet ( bo1 boolean, ti1 tinyint, si1 smallint, @@ -17,10 +14,7 @@ CREATE TABLE alltypesparquet ( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@alltypesparquet -POSTHOOK: query: -- This test makes sure that parquet can read older parquet files written by Hive <= 0.12 --- alltypesparquet is a files written by older version of Hive - -CREATE TABLE alltypesparquet ( +POSTHOOK: query: CREATE TABLE alltypesparquet ( bo1 boolean, ti1 tinyint, si1 smallint, diff --git a/ql/src/test/results/clientpositive/parquet_schema_evolution.q.out b/ql/src/test/results/clientpositive/parquet_schema_evolution.q.out index 07595d2..43d75dc 100644 --- a/ql/src/test/results/clientpositive/parquet_schema_evolution.q.out +++ b/ql/src/test/results/clientpositive/parquet_schema_evolution.q.out @@ -1,14 +1,6 @@ -PREHOOK: query: -- Some tables might have extra columns and struct elements on the schema than the on Parquet schema; --- This is called 'schema evolution' as the Parquet file is not ready yet for such new columns; --- Hive should support this schema, and return NULL values instead; - -DROP TABLE NewStructField +PREHOOK: query: DROP TABLE NewStructField PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Some tables might have extra columns and struct elements on the schema than the on Parquet schema; --- This is called 'schema evolution' as the Parquet file is not ready yet for such new columns; --- Hive should support this schema, and return NULL values instead; - -DROP TABLE NewStructField +POSTHOOK: query: DROP TABLE NewStructField POSTHOOK: type: DROPTABLE PREHOOK: query: DROP TABLE NewStructFieldTable PREHOOK: type: DROPTABLE @@ -59,13 +51,11 @@ POSTHOOK: Input: default@newstructfield {"a1":{"k1":"v1"},"a2":{"e1":5}} {"a1":{"k1":"v1"},"a2":{"e1":5}} {"a1":{"k1":"v1"},"a2":{"e1":5}} -PREHOOK: query: -- Adds new fields to the struct types -ALTER TABLE NewStructField REPLACE COLUMNS (a struct, a2:struct, a3:int>, b int) +PREHOOK: query: ALTER TABLE NewStructField REPLACE COLUMNS (a struct, a2:struct, a3:int>, b int) PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@newstructfield PREHOOK: Output: default@newstructfield -POSTHOOK: query: -- Adds new fields to the struct types -ALTER TABLE NewStructField REPLACE COLUMNS (a struct, a2:struct, a3:int>, b int) +POSTHOOK: query: ALTER TABLE NewStructField REPLACE COLUMNS (a struct, a2:struct, a3:int>, b int) POSTHOOK: type: ALTERTABLE_REPLACECOLS POSTHOOK: Input: default@newstructfield POSTHOOK: Output: default@newstructfield @@ -90,14 +80,12 @@ POSTHOOK: Input: default@newstructfield {"a1":{"k1":"v1"},"a2":{"e1":5,"e2":null},"a3":null} NULL {"a1":{"k1":"v1"},"a2":{"e1":5,"e2":null},"a3":null} NULL {"a1":{"k1":"v1"},"a2":{"e1":5,"e2":null},"a3":null} NULL -PREHOOK: query: -- Makes sure that new parquet tables contain the new struct field -CREATE TABLE NewStructFieldTable STORED AS PARQUET AS SELECT * FROM NewStructField +PREHOOK: query: CREATE TABLE NewStructFieldTable STORED AS PARQUET AS SELECT * FROM NewStructField PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@newstructfield PREHOOK: Output: database:default PREHOOK: Output: default@NewStructFieldTable -POSTHOOK: query: -- Makes sure that new parquet tables contain the new struct field -CREATE TABLE NewStructFieldTable STORED AS PARQUET AS SELECT * FROM NewStructField +POSTHOOK: query: CREATE TABLE NewStructFieldTable STORED AS PARQUET AS SELECT * FROM NewStructField POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@newstructfield POSTHOOK: Output: database:default @@ -125,13 +113,9 @@ POSTHOOK: Input: default@newstructfieldtable {"a1":{"k1":"v1"},"a2":{"e1":5,"e2":null},"a3":null} NULL {"a1":{"k1":"v1"},"a2":{"e1":5,"e2":null},"a3":null} NULL {"a1":{"k1":"v1"},"a2":{"e1":5,"e2":null},"a3":null} NULL -PREHOOK: query: -- test if the order of fields in array> changes, it works fine - -DROP TABLE IF EXISTS schema_test +PREHOOK: query: DROP TABLE IF EXISTS schema_test PREHOOK: type: DROPTABLE -POSTHOOK: query: -- test if the order of fields in array> changes, it works fine - -DROP TABLE IF EXISTS schema_test +POSTHOOK: query: DROP TABLE IF EXISTS schema_test POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE schema_test (msg array>, b: array>>>) STORED AS PARQUET PREHOOK: type: CREATETABLE @@ -162,13 +146,11 @@ POSTHOOK: Input: default@schema_test #### A masked pattern was here #### [{"f1":"abc","f2":"abc2","a":[{"a1":"a1","a2":"a2"}],"b":[{"b1":1,"b2":2}]}] [{"f1":"abc","f2":"abc2","a":[{"a1":"a1","a2":"a2"}],"b":[{"b1":1,"b2":2}]}] -PREHOOK: query: -- Order of fields swapped -ALTER TABLE schema_test CHANGE msg msg array>, b: array>, f2: string, f1: string>> +PREHOOK: query: ALTER TABLE schema_test CHANGE msg msg array>, b: array>, f2: string, f1: string>> PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@schema_test PREHOOK: Output: default@schema_test -POSTHOOK: query: -- Order of fields swapped -ALTER TABLE schema_test CHANGE msg msg array>, b: array>, f2: string, f1: string>> +POSTHOOK: query: ALTER TABLE schema_test CHANGE msg msg array>, b: array>, f2: string, f1: string>> POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: default@schema_test POSTHOOK: Output: default@schema_test diff --git a/ql/src/test/results/clientpositive/parquet_serde.q.out b/ql/src/test/results/clientpositive/parquet_serde.q.out index 6d5f0f8..43c9bd0 100644 --- a/ql/src/test/results/clientpositive/parquet_serde.q.out +++ b/ql/src/test/results/clientpositive/parquet_serde.q.out @@ -22,15 +22,11 @@ FIELDS TERMINATED BY '|' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@parquet_mixed_fileformat -PREHOOK: query: ---- partition dateint=20140330 is stored as TEXTFILE - -LOAD DATA LOCAL INPATH '../../data/files/parquet_partitioned.txt' OVERWRITE INTO TABLE parquet_mixed_fileformat PARTITION (dateint=20140330) +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/parquet_partitioned.txt' OVERWRITE INTO TABLE parquet_mixed_fileformat PARTITION (dateint=20140330) PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@parquet_mixed_fileformat -POSTHOOK: query: ---- partition dateint=20140330 is stored as TEXTFILE - -LOAD DATA LOCAL INPATH '../../data/files/parquet_partitioned.txt' OVERWRITE INTO TABLE parquet_mixed_fileformat PARTITION (dateint=20140330) +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/parquet_partitioned.txt' OVERWRITE INTO TABLE parquet_mixed_fileformat PARTITION (dateint=20140330) POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@parquet_mixed_fileformat @@ -88,15 +84,11 @@ Sort Columns: [] Storage Desc Params: field.delim | serialization.format | -PREHOOK: query: ---change table serde and file format to PARQUET---- - -ALTER TABLE parquet_mixed_fileformat set SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' +PREHOOK: query: ALTER TABLE parquet_mixed_fileformat set SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' PREHOOK: type: ALTERTABLE_SERIALIZER PREHOOK: Input: default@parquet_mixed_fileformat PREHOOK: Output: default@parquet_mixed_fileformat -POSTHOOK: query: ---change table serde and file format to PARQUET---- - -ALTER TABLE parquet_mixed_fileformat set SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' +POSTHOOK: query: ALTER TABLE parquet_mixed_fileformat set SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' POSTHOOK: type: ALTERTABLE_SERIALIZER POSTHOOK: Input: default@parquet_mixed_fileformat POSTHOOK: Output: default@parquet_mixed_fileformat diff --git a/ql/src/test/results/clientpositive/parquet_table_with_subschema.q.out b/ql/src/test/results/clientpositive/parquet_table_with_subschema.q.out index c6b57f4..0abcd0e 100644 --- a/ql/src/test/results/clientpositive/parquet_table_with_subschema.q.out +++ b/ql/src/test/results/clientpositive/parquet_table_with_subschema.q.out @@ -1,22 +1,12 @@ -PREHOOK: query: -- Sometimes, the user wants to create a table from just a portion of the file schema; --- This test makes sure that this scenario works; - -DROP TABLE test +PREHOOK: query: DROP TABLE test PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Sometimes, the user wants to create a table from just a portion of the file schema; --- This test makes sure that this scenario works; - -DROP TABLE test +POSTHOOK: query: DROP TABLE test POSTHOOK: type: DROPTABLE -PREHOOK: query: -- Current file schema is: (id int, name string, address struct); --- Creates a table from just a portion of the file schema, including struct elements (test lower/upper case as well) -CREATE TABLE test (Name string, address struct) STORED AS PARQUET +PREHOOK: query: CREATE TABLE test (Name string, address struct) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test -POSTHOOK: query: -- Current file schema is: (id int, name string, address struct); --- Creates a table from just a portion of the file schema, including struct elements (test lower/upper case as well) -CREATE TABLE test (Name string, address struct) STORED AS PARQUET +POSTHOOK: query: CREATE TABLE test (Name string, address struct) STORED AS PARQUET POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test diff --git a/ql/src/test/results/clientpositive/parquet_thrift_array_of_primitives.q.out b/ql/src/test/results/clientpositive/parquet_thrift_array_of_primitives.q.out index f2b53c5..e97de66 100644 --- a/ql/src/test/results/clientpositive/parquet_thrift_array_of_primitives.q.out +++ b/ql/src/test/results/clientpositive/parquet_thrift_array_of_primitives.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- this test creates a Parquet table with an array of structs - -CREATE TABLE parquet_thrift_array_of_primitives ( +PREHOOK: query: CREATE TABLE parquet_thrift_array_of_primitives ( list_of_ints ARRAY ) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@parquet_thrift_array_of_primitives -POSTHOOK: query: -- this test creates a Parquet table with an array of structs - -CREATE TABLE parquet_thrift_array_of_primitives ( +POSTHOOK: query: CREATE TABLE parquet_thrift_array_of_primitives ( list_of_ints ARRAY ) STORED AS PARQUET POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/parquet_thrift_array_of_single_field_struct.q.out b/ql/src/test/results/clientpositive/parquet_thrift_array_of_single_field_struct.q.out index 46473ab..2e29380 100644 --- a/ql/src/test/results/clientpositive/parquet_thrift_array_of_single_field_struct.q.out +++ b/ql/src/test/results/clientpositive/parquet_thrift_array_of_single_field_struct.q.out @@ -1,16 +1,10 @@ -PREHOOK: query: -- this test creates a Parquet table with an array of single-field structs --- as written by parquet-thrift - -CREATE TABLE parquet_thrift_array_of_single_field_structs ( +PREHOOK: query: CREATE TABLE parquet_thrift_array_of_single_field_structs ( single_element_groups ARRAY> ) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@parquet_thrift_array_of_single_field_structs -POSTHOOK: query: -- this test creates a Parquet table with an array of single-field structs --- as written by parquet-thrift - -CREATE TABLE parquet_thrift_array_of_single_field_structs ( +POSTHOOK: query: CREATE TABLE parquet_thrift_array_of_single_field_structs ( single_element_groups ARRAY> ) STORED AS PARQUET POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/parquet_type_promotion.q.out b/ql/src/test/results/clientpositive/parquet_type_promotion.q.out index 71c271d..c4ed002 100644 --- a/ql/src/test/results/clientpositive/parquet_type_promotion.q.out +++ b/ql/src/test/results/clientpositive/parquet_type_promotion.q.out @@ -201,19 +201,9 @@ POSTHOOK: Input: default@parquet_type_promotion 200 250.0 255.0 5643.0 7777.0 0.4000000059604645 0.8 {"k2":14} {"k2":13.0} [8,17,24] [15.0,17.0,19.0] {"c1":20,"c2":20} {"v11":5.0} [3.299999952316284,3.0999999046325684,5.599999904632568] {"c1":5.800000190734863,"c2":4.7} 300 350.0 355.0 7643.0 8888.0 0.4000000059604645 0.9 {"k3":12} {"k3":19.0} [9,17,25] [21.0,23.0,25.0] {"c1":30,"c2":60} {"b11":6.0} [4.300000190734863,3.200000047683716,5.699999809265137] {"c1":5.900000095367432,"c2":4.6} 400 450.0 455.0 8643.0 9999.0 0.4000000059604645 0.5 {"k4":15} {"k4":23.0} [7,18,27] [27.0,29.0,31.0] {"c1":50,"c2":70} {"d11":8.0} [6.300000190734863,3.299999952316284,5.800000190734863] {"c1":5.0,"c2":4.5} -PREHOOK: query: -- This test covers the case where array> data --- can be retrieved useing map. --- This also test if there are more than 2 fields in array_of_struct --- which can be read as map as well. - -DROP TABLE arrays_of_struct_to_map +PREHOOK: query: DROP TABLE arrays_of_struct_to_map PREHOOK: type: DROPTABLE -POSTHOOK: query: -- This test covers the case where array> data --- can be retrieved useing map. --- This also test if there are more than 2 fields in array_of_struct --- which can be read as map as well. - -DROP TABLE arrays_of_struct_to_map +POSTHOOK: query: DROP TABLE arrays_of_struct_to_map POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE arrays_of_struct_to_map (locations1 array>, locations2 array>) STORED AS PARQUET @@ -246,14 +236,12 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@arrays_of_struct_to_map #### A masked pattern was here #### [{"c1":1,"c2":2}] [{"f1":77,"f2":88,"f3":99}] -PREHOOK: query: -- Testing schema evolution of dropping column from array> -ALTER TABLE arrays_of_struct_to_map REPLACE COLUMNS (locations1 array>, locations2 +PREHOOK: query: ALTER TABLE arrays_of_struct_to_map REPLACE COLUMNS (locations1 array>, locations2 array>) PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@arrays_of_struct_to_map PREHOOK: Output: default@arrays_of_struct_to_map -POSTHOOK: query: -- Testing schema evolution of dropping column from array> -ALTER TABLE arrays_of_struct_to_map REPLACE COLUMNS (locations1 array>, locations2 +POSTHOOK: query: ALTER TABLE arrays_of_struct_to_map REPLACE COLUMNS (locations1 array>, locations2 array>) POSTHOOK: type: ALTERTABLE_REPLACECOLS POSTHOOK: Input: default@arrays_of_struct_to_map @@ -267,14 +255,12 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@arrays_of_struct_to_map #### A masked pattern was here #### [{"c1":1}] [{"f2":88}] -PREHOOK: query: -- Testing schema evolution of adding columns into array> -ALTER TABLE arrays_of_struct_to_map REPLACE COLUMNS (locations1 array>, locations2 +PREHOOK: query: ALTER TABLE arrays_of_struct_to_map REPLACE COLUMNS (locations1 array>, locations2 array>) PREHOOK: type: ALTERTABLE_REPLACECOLS PREHOOK: Input: default@arrays_of_struct_to_map PREHOOK: Output: default@arrays_of_struct_to_map -POSTHOOK: query: -- Testing schema evolution of adding columns into array> -ALTER TABLE arrays_of_struct_to_map REPLACE COLUMNS (locations1 array>, locations2 +POSTHOOK: query: ALTER TABLE arrays_of_struct_to_map REPLACE COLUMNS (locations1 array>, locations2 array>) POSTHOOK: type: ALTERTABLE_REPLACECOLS POSTHOOK: Input: default@arrays_of_struct_to_map diff --git a/ql/src/test/results/clientpositive/parquet_types_non_dictionary_encoding_vectorization.q.out b/ql/src/test/results/clientpositive/parquet_types_non_dictionary_encoding_vectorization.q.out index a9f5e48..5a64053 100644 --- a/ql/src/test/results/clientpositive/parquet_types_non_dictionary_encoding_vectorization.q.out +++ b/ql/src/test/results/clientpositive/parquet_types_non_dictionary_encoding_vectorization.q.out @@ -432,9 +432,7 @@ POSTHOOK: Lineage: parquet_types.l1 SIMPLE [(parquet_types_staging)parquet_types POSTHOOK: Lineage: parquet_types.m1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:m1, type:map, comment:null), ] POSTHOOK: Lineage: parquet_types.st1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:st1, type:struct, comment:null), ] POSTHOOK: Lineage: parquet_types.t SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:t, type:timestamp, comment:null), ] -PREHOOK: query: -- test types in group by - -EXPLAIN SELECT ctinyint, +PREHOOK: query: EXPLAIN SELECT ctinyint, MAX(cint), MIN(csmallint), COUNT(cstring1), @@ -444,9 +442,7 @@ FROM parquet_types GROUP BY ctinyint ORDER BY ctinyint PREHOOK: type: QUERY -POSTHOOK: query: -- test types in group by - -EXPLAIN SELECT ctinyint, +POSTHOOK: query: EXPLAIN SELECT ctinyint, MAX(cint), MIN(csmallint), COUNT(cstring1), diff --git a/ql/src/test/results/clientpositive/parquet_types_vectorization.q.out b/ql/src/test/results/clientpositive/parquet_types_vectorization.q.out index 7818d73..9f2fbb2 100644 --- a/ql/src/test/results/clientpositive/parquet_types_vectorization.q.out +++ b/ql/src/test/results/clientpositive/parquet_types_vectorization.q.out @@ -152,9 +152,7 @@ POSTHOOK: Lineage: parquet_types.l1 SIMPLE [(parquet_types_staging)parquet_types POSTHOOK: Lineage: parquet_types.m1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:m1, type:map, comment:null), ] POSTHOOK: Lineage: parquet_types.st1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:st1, type:struct, comment:null), ] POSTHOOK: Lineage: parquet_types.t SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:t, type:timestamp, comment:null), ] -PREHOOK: query: -- test types in group by - -EXPLAIN SELECT ctinyint, +PREHOOK: query: EXPLAIN SELECT ctinyint, MAX(cint), MIN(csmallint), COUNT(cstring1), @@ -164,9 +162,7 @@ FROM parquet_types GROUP BY ctinyint ORDER BY ctinyint PREHOOK: type: QUERY -POSTHOOK: query: -- test types in group by - -EXPLAIN SELECT ctinyint, +POSTHOOK: query: EXPLAIN SELECT ctinyint, MAX(cint), MIN(csmallint), COUNT(cstring1), diff --git a/ql/src/test/results/clientpositive/parquet_write_correct_definition_levels.q.out b/ql/src/test/results/clientpositive/parquet_write_correct_definition_levels.q.out index de58369..ee06d50 100644 --- a/ql/src/test/results/clientpositive/parquet_write_correct_definition_levels.q.out +++ b/ql/src/test/results/clientpositive/parquet_write_correct_definition_levels.q.out @@ -8,31 +8,27 @@ STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@text_tbl -PREHOOK: query: -- This inserts one NULL row -INSERT OVERWRITE TABLE text_tbl +PREHOOK: query: INSERT OVERWRITE TABLE text_tbl SELECT IF(false, named_struct("b", named_struct("c", 1)), NULL) FROM src LIMIT 1 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@text_tbl -POSTHOOK: query: -- This inserts one NULL row -INSERT OVERWRITE TABLE text_tbl +POSTHOOK: query: INSERT OVERWRITE TABLE text_tbl SELECT IF(false, named_struct("b", named_struct("c", 1)), NULL) FROM src LIMIT 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@text_tbl POSTHOOK: Lineage: text_tbl.a EXPRESSION [] -PREHOOK: query: -- We test that parquet is written with a level 0 definition -CREATE TABLE parq_tbl +PREHOOK: query: CREATE TABLE parq_tbl STORED AS PARQUET AS SELECT * FROM text_tbl PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@text_tbl PREHOOK: Output: database:default PREHOOK: Output: default@parq_tbl -POSTHOOK: query: -- We test that parquet is written with a level 0 definition -CREATE TABLE parq_tbl +POSTHOOK: query: CREATE TABLE parq_tbl STORED AS PARQUET AS SELECT * FROM text_tbl POSTHOOK: type: CREATETABLE_AS_SELECT diff --git a/ql/src/test/results/clientpositive/part_inherit_tbl_props.q.out b/ql/src/test/results/clientpositive/part_inherit_tbl_props.q.out index 876d0db..077b1f1 100644 --- a/ql/src/test/results/clientpositive/part_inherit_tbl_props.q.out +++ b/ql/src/test/results/clientpositive/part_inherit_tbl_props.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- The property needs to be unset at the end of the test till HIVE-3109/HIVE-3112 is fixed - -create table mytbl (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval') +PREHOOK: query: create table mytbl (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@mytbl -POSTHOOK: query: -- The property needs to be unset at the end of the test till HIVE-3109/HIVE-3112 is fixed - -create table mytbl (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval') +POSTHOOK: query: create table mytbl (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@mytbl diff --git a/ql/src/test/results/clientpositive/part_inherit_tbl_props_with_star.q.out b/ql/src/test/results/clientpositive/part_inherit_tbl_props_with_star.q.out index 0be588a..6036e5c 100644 --- a/ql/src/test/results/clientpositive/part_inherit_tbl_props_with_star.q.out +++ b/ql/src/test/results/clientpositive/part_inherit_tbl_props_with_star.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- The property needs to be unset at the end of the test till HIVE-3109/HIVE-3112 is fixed - -create table mytbl (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval') +PREHOOK: query: create table mytbl (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@mytbl -POSTHOOK: query: -- The property needs to be unset at the end of the test till HIVE-3109/HIVE-3112 is fixed - -create table mytbl (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval') +POSTHOOK: query: create table mytbl (c1 tinyint) partitioned by (c2 string) tblproperties ('a'='myval','b'='yourval','c'='noval') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@mytbl diff --git a/ql/src/test/results/clientpositive/partition_boolexpr.q.out b/ql/src/test/results/clientpositive/partition_boolexpr.q.out index 8e75910..0f2a85d 100644 --- a/ql/src/test/results/clientpositive/partition_boolexpr.q.out +++ b/ql/src/test/results/clientpositive/partition_boolexpr.q.out @@ -1,20 +1,16 @@ -PREHOOK: query: -- create testing table. -create table part_boolexpr(key int, value string) partitioned by (dt int, ts string) +PREHOOK: query: create table part_boolexpr(key int, value string) partitioned by (dt int, ts string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@part_boolexpr -POSTHOOK: query: -- create testing table. -create table part_boolexpr(key int, value string) partitioned by (dt int, ts string) +POSTHOOK: query: create table part_boolexpr(key int, value string) partitioned by (dt int, ts string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@part_boolexpr -PREHOOK: query: -- both the below queries should return 0 rows -select count(*) from part_boolexpr where key = 'abc' +PREHOOK: query: select count(*) from part_boolexpr where key = 'abc' PREHOOK: type: QUERY PREHOOK: Input: default@part_boolexpr #### A masked pattern was here #### -POSTHOOK: query: -- both the below queries should return 0 rows -select count(*) from part_boolexpr where key = 'abc' +POSTHOOK: query: select count(*) from part_boolexpr where key = 'abc' POSTHOOK: type: QUERY POSTHOOK: Input: default@part_boolexpr #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/partition_coltype_literals.q.out b/ql/src/test/results/clientpositive/partition_coltype_literals.q.out index 06c178c..61d6423 100644 --- a/ql/src/test/results/clientpositive/partition_coltype_literals.q.out +++ b/ql/src/test/results/clientpositive/partition_coltype_literals.q.out @@ -10,21 +10,17 @@ POSTHOOK: query: create table partcoltypenum (key int, value string) partitioned POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@partcoltypenum -PREHOOK: query: -- add partition -alter table partcoltypenum add partition(tint=100Y, sint=20000S, bint=300000000000L) +PREHOOK: query: alter table partcoltypenum add partition(tint=100Y, sint=20000S, bint=300000000000L) PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Output: default@partcoltypenum -POSTHOOK: query: -- add partition -alter table partcoltypenum add partition(tint=100Y, sint=20000S, bint=300000000000L) +POSTHOOK: query: alter table partcoltypenum add partition(tint=100Y, sint=20000S, bint=300000000000L) POSTHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: Output: default@partcoltypenum POSTHOOK: Output: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 -PREHOOK: query: -- describe partition -describe formatted partcoltypenum partition (tint=100, sint=20000S, bint='300000000000') +PREHOOK: query: describe formatted partcoltypenum partition (tint=100, sint=20000S, bint='300000000000') PREHOOK: type: DESCTABLE PREHOOK: Input: default@partcoltypenum -POSTHOOK: query: -- describe partition -describe formatted partcoltypenum partition (tint=100, sint=20000S, bint='300000000000') +POSTHOOK: query: describe formatted partcoltypenum partition (tint=100, sint=20000S, bint='300000000000') POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@partcoltypenum # col_name data_type comment @@ -62,13 +58,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- change partition file format -alter table partcoltypenum partition(tint=100, sint=20000S, bint='300000000000') set fileformat rcfile +PREHOOK: query: alter table partcoltypenum partition(tint=100, sint=20000S, bint='300000000000') set fileformat rcfile PREHOOK: type: ALTERPARTITION_FILEFORMAT PREHOOK: Input: default@partcoltypenum PREHOOK: Output: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 -POSTHOOK: query: -- change partition file format -alter table partcoltypenum partition(tint=100, sint=20000S, bint='300000000000') set fileformat rcfile +POSTHOOK: query: alter table partcoltypenum partition(tint=100, sint=20000S, bint='300000000000') set fileformat rcfile POSTHOOK: type: ALTERPARTITION_FILEFORMAT POSTHOOK: Input: default@partcoltypenum POSTHOOK: Input: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 @@ -115,13 +109,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- change partition clusterby, sortby and bucket -alter table partcoltypenum partition(tint='100', sint=20000, bint=300000000000L) clustered by (key) sorted by (key desc) into 4 buckets +PREHOOK: query: alter table partcoltypenum partition(tint='100', sint=20000, bint=300000000000L) clustered by (key) sorted by (key desc) into 4 buckets PREHOOK: type: ALTERTABLE_CLUSTER_SORT PREHOOK: Input: default@partcoltypenum PREHOOK: Output: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 -POSTHOOK: query: -- change partition clusterby, sortby and bucket -alter table partcoltypenum partition(tint='100', sint=20000, bint=300000000000L) clustered by (key) sorted by (key desc) into 4 buckets +POSTHOOK: query: alter table partcoltypenum partition(tint='100', sint=20000, bint=300000000000L) clustered by (key) sorted by (key desc) into 4 buckets POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@partcoltypenum POSTHOOK: Input: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 @@ -168,13 +160,11 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:0)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- rename partition -alter table partcoltypenum partition(tint=100, sint=20000, bint=300000000000) rename to partition (tint=110Y, sint=22000S, bint=330000000000L) +PREHOOK: query: alter table partcoltypenum partition(tint=100, sint=20000, bint=300000000000) rename to partition (tint=110Y, sint=22000S, bint=330000000000L) PREHOOK: type: ALTERTABLE_RENAMEPART PREHOOK: Input: default@partcoltypenum PREHOOK: Output: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 -POSTHOOK: query: -- rename partition -alter table partcoltypenum partition(tint=100, sint=20000, bint=300000000000) rename to partition (tint=110Y, sint=22000S, bint=330000000000L) +POSTHOOK: query: alter table partcoltypenum partition(tint=100, sint=20000, bint=300000000000) rename to partition (tint=110Y, sint=22000S, bint=330000000000L) POSTHOOK: type: ALTERTABLE_RENAMEPART POSTHOOK: Input: default@partcoltypenum POSTHOOK: Input: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 @@ -222,13 +212,11 @@ Bucket Columns: [key] Sort Columns: [Order(col:key, order:0)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- insert partition -insert into partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) select key, value from src limit 10 +PREHOOK: query: insert into partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) select key, value from src limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 -POSTHOOK: query: -- insert partition -insert into partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) select key, value from src limit 10 +POSTHOOK: query: insert into partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) select key, value from src limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 @@ -244,14 +232,12 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 POSTHOOK: Lineage: partcoltypenum PARTITION(tint=110,sint=22000,bint=330000000000).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: partcoltypenum PARTITION(tint=110,sint=22000,bint=330000000000).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- select partition -select count(1) from partcoltypenum where tint=110Y and sint=22000S and bint=330000000000L +PREHOOK: query: select count(1) from partcoltypenum where tint=110Y and sint=22000S and bint=330000000000L PREHOOK: type: QUERY PREHOOK: Input: default@partcoltypenum PREHOOK: Input: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 #### A masked pattern was here #### -POSTHOOK: query: -- select partition -select count(1) from partcoltypenum where tint=110Y and sint=22000S and bint=330000000000L +POSTHOOK: query: select count(1) from partcoltypenum where tint=110Y and sint=22000S and bint=330000000000L POSTHOOK: type: QUERY POSTHOOK: Input: default@partcoltypenum POSTHOOK: Input: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 @@ -268,15 +254,13 @@ POSTHOOK: Input: default@partcoltypenum POSTHOOK: Input: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 #### A masked pattern was here #### 30 -PREHOOK: query: -- analyze partition statistics and columns statistics -analyze table partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) compute statistics +PREHOOK: query: analyze table partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) compute statistics PREHOOK: type: QUERY PREHOOK: Input: default@partcoltypenum PREHOOK: Input: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 PREHOOK: Output: default@partcoltypenum PREHOOK: Output: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 -POSTHOOK: query: -- analyze partition statistics and columns statistics -analyze table partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) compute statistics +POSTHOOK: query: analyze table partcoltypenum partition (tint=110Y, sint=22000S, bint=330000000000L) compute statistics POSTHOOK: type: QUERY POSTHOOK: Input: default@partcoltypenum POSTHOOK: Input: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 @@ -330,13 +314,11 @@ POSTHOOK: Input: default@partcoltypenum # col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment value string 0 18 6.766666666666667 7 from deserializer -PREHOOK: query: -- change table column type for partition -alter table partcoltypenum change key key decimal(10,0) +PREHOOK: query: alter table partcoltypenum change key key decimal(10,0) PREHOOK: type: ALTERTABLE_RENAMECOL PREHOOK: Input: default@partcoltypenum PREHOOK: Output: default@partcoltypenum -POSTHOOK: query: -- change table column type for partition -alter table partcoltypenum change key key decimal(10,0) +POSTHOOK: query: alter table partcoltypenum change key key decimal(10,0) POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: default@partcoltypenum POSTHOOK: Output: default@partcoltypenum @@ -391,12 +373,10 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- change partititon column type -alter table partcoltypenum partition column (tint decimal(3,0)) +PREHOOK: query: alter table partcoltypenum partition column (tint decimal(3,0)) PREHOOK: type: ALTERTABLE_PARTCOLTYPE PREHOOK: Input: default@partcoltypenum -POSTHOOK: query: -- change partititon column type -alter table partcoltypenum partition column (tint decimal(3,0)) +POSTHOOK: query: alter table partcoltypenum partition column (tint decimal(3,0)) POSTHOOK: type: ALTERTABLE_PARTCOLTYPE POSTHOOK: Input: default@partcoltypenum POSTHOOK: Output: default@partcoltypenum @@ -442,22 +422,18 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- show partition -show partitions partcoltypenum partition (tint=110BD, sint=22000S, bint=330000000000L) +PREHOOK: query: show partitions partcoltypenum partition (tint=110BD, sint=22000S, bint=330000000000L) PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@partcoltypenum -POSTHOOK: query: -- show partition -show partitions partcoltypenum partition (tint=110BD, sint=22000S, bint=330000000000L) +POSTHOOK: query: show partitions partcoltypenum partition (tint=110BD, sint=22000S, bint=330000000000L) POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@partcoltypenum tint=110/sint=22000/bint=330000000000 -PREHOOK: query: -- drop partition -alter table partcoltypenum drop partition (tint=110BD, sint=22000S, bint=330000000000L) +PREHOOK: query: alter table partcoltypenum drop partition (tint=110BD, sint=22000S, bint=330000000000L) PREHOOK: type: ALTERTABLE_DROPPARTS PREHOOK: Input: default@partcoltypenum PREHOOK: Output: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 -POSTHOOK: query: -- drop partition -alter table partcoltypenum drop partition (tint=110BD, sint=22000S, bint=330000000000L) +POSTHOOK: query: alter table partcoltypenum drop partition (tint=110BD, sint=22000S, bint=330000000000L) POSTHOOK: type: ALTERTABLE_DROPPARTS POSTHOOK: Input: default@partcoltypenum POSTHOOK: Output: default@partcoltypenum@tint=110/sint=22000/bint=330000000000 @@ -467,13 +443,11 @@ PREHOOK: Input: default@partcoltypenum POSTHOOK: query: show partitions partcoltypenum POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@partcoltypenum -PREHOOK: query: -- change partition file location -insert into partcoltypenum partition (tint=100BD, sint=20000S, bint=300000000000L) select key, value from src limit 10 +PREHOOK: query: insert into partcoltypenum partition (tint=100BD, sint=20000S, bint=300000000000L) select key, value from src limit 10 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 -POSTHOOK: query: -- change partition file location -insert into partcoltypenum partition (tint=100BD, sint=20000S, bint=300000000000L) select key, value from src limit 10 +POSTHOOK: query: insert into partcoltypenum partition (tint=100BD, sint=20000S, bint=300000000000L) select key, value from src limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@partcoltypenum@tint=100/sint=20000/bint=300000000000 diff --git a/ql/src/test/results/clientpositive/partition_date.q.out b/ql/src/test/results/clientpositive/partition_date.q.out index f6584ee..bbb8a04 100644 --- a/ql/src/test/results/clientpositive/partition_date.q.out +++ b/ql/src/test/results/clientpositive/partition_date.q.out @@ -106,175 +106,137 @@ POSTHOOK: Input: default@partition_date_1@dt=2000-01-01/region=2 27 val_27 2000-01-01 2 311 val_311 2000-01-01 2 86 val_86 2000-01-01 2 -PREHOOK: query: -- 15 -select count(*) from partition_date_1 where dt = date '2000-01-01' +PREHOOK: query: select count(*) from partition_date_1 where dt = date '2000-01-01' PREHOOK: type: QUERY PREHOOK: Input: default@partition_date_1 #### A masked pattern was here #### -POSTHOOK: query: -- 15 -select count(*) from partition_date_1 where dt = date '2000-01-01' +POSTHOOK: query: select count(*) from partition_date_1 where dt = date '2000-01-01' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_date_1 #### A masked pattern was here #### 15 -PREHOOK: query: -- 15. Also try with string value in predicate -select count(*) from partition_date_1 where dt = '2000-01-01' +PREHOOK: query: select count(*) from partition_date_1 where dt = '2000-01-01' PREHOOK: type: QUERY PREHOOK: Input: default@partition_date_1 #### A masked pattern was here #### -POSTHOOK: query: -- 15. Also try with string value in predicate -select count(*) from partition_date_1 where dt = '2000-01-01' +POSTHOOK: query: select count(*) from partition_date_1 where dt = '2000-01-01' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_date_1 #### A masked pattern was here #### 15 -PREHOOK: query: -- 5 -select count(*) from partition_date_1 where dt = date '2000-01-01' and region = '2' +PREHOOK: query: select count(*) from partition_date_1 where dt = date '2000-01-01' and region = '2' PREHOOK: type: QUERY PREHOOK: Input: default@partition_date_1 #### A masked pattern was here #### -POSTHOOK: query: -- 5 -select count(*) from partition_date_1 where dt = date '2000-01-01' and region = '2' +POSTHOOK: query: select count(*) from partition_date_1 where dt = date '2000-01-01' and region = '2' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_date_1 #### A masked pattern was here #### 5 -PREHOOK: query: -- 11 -select count(*) from partition_date_1 where dt = date '2013-08-08' and region = '10' +PREHOOK: query: select count(*) from partition_date_1 where dt = date '2013-08-08' and region = '10' PREHOOK: type: QUERY PREHOOK: Input: default@partition_date_1 #### A masked pattern was here #### -POSTHOOK: query: -- 11 -select count(*) from partition_date_1 where dt = date '2013-08-08' and region = '10' +POSTHOOK: query: select count(*) from partition_date_1 where dt = date '2013-08-08' and region = '10' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_date_1 #### A masked pattern was here #### 11 -PREHOOK: query: -- 30 -select count(*) from partition_date_1 where region = '1' +PREHOOK: query: select count(*) from partition_date_1 where region = '1' PREHOOK: type: QUERY PREHOOK: Input: default@partition_date_1 #### A masked pattern was here #### -POSTHOOK: query: -- 30 -select count(*) from partition_date_1 where region = '1' +POSTHOOK: query: select count(*) from partition_date_1 where region = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_date_1 #### A masked pattern was here #### 30 -PREHOOK: query: -- 0 -select count(*) from partition_date_1 where dt = date '2000-01-01' and region = '3' +PREHOOK: query: select count(*) from partition_date_1 where dt = date '2000-01-01' and region = '3' PREHOOK: type: QUERY PREHOOK: Input: default@partition_date_1 #### A masked pattern was here #### -POSTHOOK: query: -- 0 -select count(*) from partition_date_1 where dt = date '2000-01-01' and region = '3' +POSTHOOK: query: select count(*) from partition_date_1 where dt = date '2000-01-01' and region = '3' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_date_1 #### A masked pattern was here #### 0 -PREHOOK: query: -- 0 -select count(*) from partition_date_1 where dt = date '1999-01-01' +PREHOOK: query: select count(*) from partition_date_1 where dt = date '1999-01-01' PREHOOK: type: QUERY PREHOOK: Input: default@partition_date_1 #### A masked pattern was here #### -POSTHOOK: query: -- 0 -select count(*) from partition_date_1 where dt = date '1999-01-01' +POSTHOOK: query: select count(*) from partition_date_1 where dt = date '1999-01-01' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_date_1 #### A masked pattern was here #### 0 -PREHOOK: query: -- Try other comparison operations - --- 20 -select count(*) from partition_date_1 where dt > date '2000-01-01' and region = '1' +PREHOOK: query: select count(*) from partition_date_1 where dt > date '2000-01-01' and region = '1' PREHOOK: type: QUERY PREHOOK: Input: default@partition_date_1 #### A masked pattern was here #### -POSTHOOK: query: -- Try other comparison operations - --- 20 -select count(*) from partition_date_1 where dt > date '2000-01-01' and region = '1' +POSTHOOK: query: select count(*) from partition_date_1 where dt > date '2000-01-01' and region = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_date_1 #### A masked pattern was here #### 20 -PREHOOK: query: -- 10 -select count(*) from partition_date_1 where dt < date '2000-01-02' and region = '1' +PREHOOK: query: select count(*) from partition_date_1 where dt < date '2000-01-02' and region = '1' PREHOOK: type: QUERY PREHOOK: Input: default@partition_date_1 #### A masked pattern was here #### -POSTHOOK: query: -- 10 -select count(*) from partition_date_1 where dt < date '2000-01-02' and region = '1' +POSTHOOK: query: select count(*) from partition_date_1 where dt < date '2000-01-02' and region = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_date_1 #### A masked pattern was here #### 10 -PREHOOK: query: -- 20 -select count(*) from partition_date_1 where dt >= date '2000-01-02' and region = '1' +PREHOOK: query: select count(*) from partition_date_1 where dt >= date '2000-01-02' and region = '1' PREHOOK: type: QUERY PREHOOK: Input: default@partition_date_1 #### A masked pattern was here #### -POSTHOOK: query: -- 20 -select count(*) from partition_date_1 where dt >= date '2000-01-02' and region = '1' +POSTHOOK: query: select count(*) from partition_date_1 where dt >= date '2000-01-02' and region = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_date_1 #### A masked pattern was here #### 20 -PREHOOK: query: -- 10 -select count(*) from partition_date_1 where dt <= date '2000-01-01' and region = '1' +PREHOOK: query: select count(*) from partition_date_1 where dt <= date '2000-01-01' and region = '1' PREHOOK: type: QUERY PREHOOK: Input: default@partition_date_1 #### A masked pattern was here #### -POSTHOOK: query: -- 10 -select count(*) from partition_date_1 where dt <= date '2000-01-01' and region = '1' +POSTHOOK: query: select count(*) from partition_date_1 where dt <= date '2000-01-01' and region = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_date_1 #### A masked pattern was here #### 10 -PREHOOK: query: -- 20 -select count(*) from partition_date_1 where dt <> date '2000-01-01' and region = '1' +PREHOOK: query: select count(*) from partition_date_1 where dt <> date '2000-01-01' and region = '1' PREHOOK: type: QUERY PREHOOK: Input: default@partition_date_1 #### A masked pattern was here #### -POSTHOOK: query: -- 20 -select count(*) from partition_date_1 where dt <> date '2000-01-01' and region = '1' +POSTHOOK: query: select count(*) from partition_date_1 where dt <> date '2000-01-01' and region = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_date_1 #### A masked pattern was here #### 20 -PREHOOK: query: -- 10 -select count(*) from partition_date_1 where dt between date '1999-12-30' and date '2000-01-03' and region = '1' +PREHOOK: query: select count(*) from partition_date_1 where dt between date '1999-12-30' and date '2000-01-03' and region = '1' PREHOOK: type: QUERY PREHOOK: Input: default@partition_date_1 #### A masked pattern was here #### -POSTHOOK: query: -- 10 -select count(*) from partition_date_1 where dt between date '1999-12-30' and date '2000-01-03' and region = '1' +POSTHOOK: query: select count(*) from partition_date_1 where dt between date '1999-12-30' and date '2000-01-03' and region = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_date_1 #### A masked pattern was here #### 10 -PREHOOK: query: -- Try a string key with date-like strings - --- 5 -select count(*) from partition_date_1 where region = '2020-20-20' +PREHOOK: query: select count(*) from partition_date_1 where region = '2020-20-20' PREHOOK: type: QUERY PREHOOK: Input: default@partition_date_1 #### A masked pattern was here #### -POSTHOOK: query: -- Try a string key with date-like strings - --- 5 -select count(*) from partition_date_1 where region = '2020-20-20' +POSTHOOK: query: select count(*) from partition_date_1 where region = '2020-20-20' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_date_1 #### A masked pattern was here #### 5 -PREHOOK: query: -- 5 -select count(*) from partition_date_1 where region > '2010-01-01' +PREHOOK: query: select count(*) from partition_date_1 where region > '2010-01-01' PREHOOK: type: QUERY PREHOOK: Input: default@partition_date_1 #### A masked pattern was here #### -POSTHOOK: query: -- 5 -select count(*) from partition_date_1 where region > '2010-01-01' +POSTHOOK: query: select count(*) from partition_date_1 where region > '2010-01-01' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_date_1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/partition_date2.q.out b/ql/src/test/results/clientpositive/partition_date2.q.out index f4fdb47..e8bd9e9 100644 --- a/ql/src/test/results/clientpositive/partition_date2.q.out +++ b/ql/src/test/results/clientpositive/partition_date2.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -drop table partition_date2_1 +PREHOOK: query: drop table partition_date2_1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -drop table partition_date2_1 +POSTHOOK: query: drop table partition_date2_1 POSTHOOK: type: DROPTABLE PREHOOK: query: create table partition_date2_1 (key string, value string) partitioned by (dt date, region int) PREHOOK: type: CREATETABLE @@ -14,8 +10,7 @@ POSTHOOK: query: create table partition_date2_1 (key string, value string) parti POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@partition_date2_1 -PREHOOK: query: -- test date literal syntax -from (select * from src tablesample (1 rows)) x +PREHOOK: query: from (select * from src tablesample (1 rows)) x insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=1) select * insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=2) select * insert overwrite table partition_date2_1 partition(dt=date '1999-01-01', region=2) select * @@ -24,8 +19,7 @@ PREHOOK: Input: default@src PREHOOK: Output: default@partition_date2_1@dt=1999-01-01/region=2 PREHOOK: Output: default@partition_date2_1@dt=2000-01-01/region=1 PREHOOK: Output: default@partition_date2_1@dt=2000-01-01/region=2 -POSTHOOK: query: -- test date literal syntax -from (select * from src tablesample (1 rows)) x +POSTHOOK: query: from (select * from src tablesample (1 rows)) x insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=1) select * insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=2) select * insert overwrite table partition_date2_1 partition(dt=date '1999-01-01', region=2) select * @@ -73,14 +67,12 @@ POSTHOOK: Input: default@partition_date2_1@dt=2000-01-01/region=2 238 val_238 1999-01-01 2 238 val_238 2000-01-01 1 238 val_238 2000-01-01 2 -PREHOOK: query: -- insert overwrite -insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=2) +PREHOOK: query: insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=2) select 'changed_key', 'changed_value' from src tablesample (2 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@partition_date2_1@dt=2000-01-01/region=2 -POSTHOOK: query: -- insert overwrite -insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=2) +POSTHOOK: query: insert overwrite table partition_date2_1 partition(dt=date '2000-01-01', region=2) select 'changed_key', 'changed_value' from src tablesample (2 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -105,12 +97,10 @@ POSTHOOK: Input: default@partition_date2_1@dt=2000-01-01/region=2 238 val_238 2000-01-01 1 changed_key changed_value 2000-01-01 2 changed_key changed_value 2000-01-01 2 -PREHOOK: query: -- truncate -truncate table partition_date2_1 partition(dt=date '2000-01-01', region=2) +PREHOOK: query: truncate table partition_date2_1 partition(dt=date '2000-01-01', region=2) PREHOOK: type: TRUNCATETABLE PREHOOK: Output: default@partition_date2_1@dt=2000-01-01/region=2 -POSTHOOK: query: -- truncate -truncate table partition_date2_1 partition(dt=date '2000-01-01', region=2) +POSTHOOK: query: truncate table partition_date2_1 partition(dt=date '2000-01-01', region=2) POSTHOOK: type: TRUNCATETABLE POSTHOOK: Output: default@partition_date2_1@dt=2000-01-01/region=2 PREHOOK: query: select distinct dt from partition_date2_1 @@ -145,12 +135,10 @@ POSTHOOK: Input: default@partition_date2_1@dt=2000-01-01/region=2 #### A masked pattern was here #### 238 val_238 1999-01-01 2 238 val_238 2000-01-01 1 -PREHOOK: query: -- alter table add partition -alter table partition_date2_1 add partition (dt=date '1980-01-02', region=3) +PREHOOK: query: alter table partition_date2_1 add partition (dt=date '1980-01-02', region=3) PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Output: default@partition_date2_1 -POSTHOOK: query: -- alter table add partition -alter table partition_date2_1 add partition (dt=date '1980-01-02', region=3) +POSTHOOK: query: alter table partition_date2_1 add partition (dt=date '1980-01-02', region=3) POSTHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: Output: default@partition_date2_1 POSTHOOK: Output: default@partition_date2_1@dt=1980-01-02/region=3 @@ -190,13 +178,11 @@ POSTHOOK: Input: default@partition_date2_1@dt=2000-01-01/region=2 #### A masked pattern was here #### 238 val_238 1999-01-01 2 238 val_238 2000-01-01 1 -PREHOOK: query: -- alter table drop -alter table partition_date2_1 drop partition (dt=date '1999-01-01', region=2) +PREHOOK: query: alter table partition_date2_1 drop partition (dt=date '1999-01-01', region=2) PREHOOK: type: ALTERTABLE_DROPPARTS PREHOOK: Input: default@partition_date2_1 PREHOOK: Output: default@partition_date2_1@dt=1999-01-01/region=2 -POSTHOOK: query: -- alter table drop -alter table partition_date2_1 drop partition (dt=date '1999-01-01', region=2) +POSTHOOK: query: alter table partition_date2_1 drop partition (dt=date '1999-01-01', region=2) POSTHOOK: type: ALTERTABLE_DROPPARTS POSTHOOK: Input: default@partition_date2_1 POSTHOOK: Output: default@partition_date2_1@dt=1999-01-01/region=2 @@ -230,27 +216,23 @@ POSTHOOK: Input: default@partition_date2_1@dt=2000-01-01/region=1 POSTHOOK: Input: default@partition_date2_1@dt=2000-01-01/region=2 #### A masked pattern was here #### 238 val_238 2000-01-01 1 -PREHOOK: query: -- alter table set serde -alter table partition_date2_1 partition(dt=date '1980-01-02', region=3) +PREHOOK: query: alter table partition_date2_1 partition(dt=date '1980-01-02', region=3) set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' PREHOOK: type: ALTERPARTITION_SERIALIZER PREHOOK: Input: default@partition_date2_1 PREHOOK: Output: default@partition_date2_1@dt=1980-01-02/region=3 -POSTHOOK: query: -- alter table set serde -alter table partition_date2_1 partition(dt=date '1980-01-02', region=3) +POSTHOOK: query: alter table partition_date2_1 partition(dt=date '1980-01-02', region=3) set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' POSTHOOK: type: ALTERPARTITION_SERIALIZER POSTHOOK: Input: default@partition_date2_1 POSTHOOK: Input: default@partition_date2_1@dt=1980-01-02/region=3 POSTHOOK: Output: default@partition_date2_1@dt=1980-01-02/region=3 -PREHOOK: query: -- alter table set fileformat -alter table partition_date2_1 partition(dt=date '1980-01-02', region=3) +PREHOOK: query: alter table partition_date2_1 partition(dt=date '1980-01-02', region=3) set fileformat rcfile PREHOOK: type: ALTERPARTITION_FILEFORMAT PREHOOK: Input: default@partition_date2_1 PREHOOK: Output: default@partition_date2_1@dt=1980-01-02/region=3 -POSTHOOK: query: -- alter table set fileformat -alter table partition_date2_1 partition(dt=date '1980-01-02', region=3) +POSTHOOK: query: alter table partition_date2_1 partition(dt=date '1980-01-02', region=3) set fileformat rcfile POSTHOOK: type: ALTERPARTITION_FILEFORMAT POSTHOOK: Input: default@partition_date2_1 @@ -303,15 +285,13 @@ POSTHOOK: Input: default@partition_date2_1@dt=2000-01-01/region=2 238 val_238 1980-01-02 3 238 val_238 2000-01-01 1 86 val_86 1980-01-02 3 -PREHOOK: query: -- alter table set location -alter table partition_date2_1 partition(dt=date '1980-01-02', region=3) +PREHOOK: query: alter table partition_date2_1 partition(dt=date '1980-01-02', region=3) #### A masked pattern was here #### PREHOOK: type: ALTERPARTITION_LOCATION PREHOOK: Input: default@partition_date2_1 PREHOOK: Output: default@partition_date2_1@dt=1980-01-02/region=3 #### A masked pattern was here #### -POSTHOOK: query: -- alter table set location -alter table partition_date2_1 partition(dt=date '1980-01-02', region=3) +POSTHOOK: query: alter table partition_date2_1 partition(dt=date '1980-01-02', region=3) #### A masked pattern was here #### POSTHOOK: type: ALTERPARTITION_LOCATION POSTHOOK: Input: default@partition_date2_1 @@ -336,13 +316,11 @@ dt date region int #### A masked pattern was here #### -PREHOOK: query: -- alter table touch -alter table partition_date2_1 touch partition(dt=date '1980-01-02', region=3) +PREHOOK: query: alter table partition_date2_1 touch partition(dt=date '1980-01-02', region=3) PREHOOK: type: ALTERTABLE_TOUCH PREHOOK: Input: default@partition_date2_1 PREHOOK: Output: default@partition_date2_1@dt=1980-01-02/region=3 -POSTHOOK: query: -- alter table touch -alter table partition_date2_1 touch partition(dt=date '1980-01-02', region=3) +POSTHOOK: query: alter table partition_date2_1 touch partition(dt=date '1980-01-02', region=3) POSTHOOK: type: ALTERTABLE_TOUCH POSTHOOK: Input: default@partition_date2_1 POSTHOOK: Input: default@partition_date2_1@dt=1980-01-02/region=3 diff --git a/ql/src/test/results/clientpositive/partition_timestamp.q.out b/ql/src/test/results/clientpositive/partition_timestamp.q.out index b32d98d..34f70a5 100644 --- a/ql/src/test/results/clientpositive/partition_timestamp.q.out +++ b/ql/src/test/results/clientpositive/partition_timestamp.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- Exclude test on Windows due to space character being escaped in Hive paths on Windows. --- EXCLUDE_OS_WINDOWS -drop table partition_timestamp_1 +PREHOOK: query: drop table partition_timestamp_1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Exclude test on Windows due to space character being escaped in Hive paths on Windows. --- EXCLUDE_OS_WINDOWS -drop table partition_timestamp_1 +POSTHOOK: query: drop table partition_timestamp_1 POSTHOOK: type: DROPTABLE PREHOOK: query: create table partition_timestamp_1 (key string, value string) partitioned by (dt timestamp, region string) PREHOOK: type: CREATETABLE @@ -105,175 +101,137 @@ POSTHOOK: query: select * from partition_timestamp_1 where dt = '2000-01-01 01:0 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### -PREHOOK: query: -- 10 -select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 01:00:00' +PREHOOK: query: select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 01:00:00' PREHOOK: type: QUERY PREHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### -POSTHOOK: query: -- 10 -select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 01:00:00' +POSTHOOK: query: select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 01:00:00' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### 10 -PREHOOK: query: -- 10. Also try with string value in predicate -select count(*) from partition_timestamp_1 where dt = '2000-01-01 01:00:00' +PREHOOK: query: select count(*) from partition_timestamp_1 where dt = '2000-01-01 01:00:00' PREHOOK: type: QUERY PREHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### -POSTHOOK: query: -- 10. Also try with string value in predicate -select count(*) from partition_timestamp_1 where dt = '2000-01-01 01:00:00' +POSTHOOK: query: select count(*) from partition_timestamp_1 where dt = '2000-01-01 01:00:00' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### 10 -PREHOOK: query: -- 5 -select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 02:00:00' and region = '2' +PREHOOK: query: select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 02:00:00' and region = '2' PREHOOK: type: QUERY PREHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### -POSTHOOK: query: -- 5 -select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 02:00:00' and region = '2' +POSTHOOK: query: select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 02:00:00' and region = '2' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### 5 -PREHOOK: query: -- 11 -select count(*) from partition_timestamp_1 where dt = timestamp '2001-01-01 03:00:00' and region = '10' +PREHOOK: query: select count(*) from partition_timestamp_1 where dt = timestamp '2001-01-01 03:00:00' and region = '10' PREHOOK: type: QUERY PREHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### -POSTHOOK: query: -- 11 -select count(*) from partition_timestamp_1 where dt = timestamp '2001-01-01 03:00:00' and region = '10' +POSTHOOK: query: select count(*) from partition_timestamp_1 where dt = timestamp '2001-01-01 03:00:00' and region = '10' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### 11 -PREHOOK: query: -- 30 -select count(*) from partition_timestamp_1 where region = '1' +PREHOOK: query: select count(*) from partition_timestamp_1 where region = '1' PREHOOK: type: QUERY PREHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### -POSTHOOK: query: -- 30 -select count(*) from partition_timestamp_1 where region = '1' +POSTHOOK: query: select count(*) from partition_timestamp_1 where region = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### 30 -PREHOOK: query: -- 0 -select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 01:00:00' and region = '3' +PREHOOK: query: select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 01:00:00' and region = '3' PREHOOK: type: QUERY PREHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### -POSTHOOK: query: -- 0 -select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 01:00:00' and region = '3' +POSTHOOK: query: select count(*) from partition_timestamp_1 where dt = timestamp '2000-01-01 01:00:00' and region = '3' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### 0 -PREHOOK: query: -- 0 -select count(*) from partition_timestamp_1 where dt = timestamp '1999-01-01 01:00:00' +PREHOOK: query: select count(*) from partition_timestamp_1 where dt = timestamp '1999-01-01 01:00:00' PREHOOK: type: QUERY PREHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### -POSTHOOK: query: -- 0 -select count(*) from partition_timestamp_1 where dt = timestamp '1999-01-01 01:00:00' +POSTHOOK: query: select count(*) from partition_timestamp_1 where dt = timestamp '1999-01-01 01:00:00' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### 0 -PREHOOK: query: -- Try other comparison operations - --- 20 -select count(*) from partition_timestamp_1 where dt > timestamp '2000-01-01 01:00:00' and region = '1' +PREHOOK: query: select count(*) from partition_timestamp_1 where dt > timestamp '2000-01-01 01:00:00' and region = '1' PREHOOK: type: QUERY PREHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### -POSTHOOK: query: -- Try other comparison operations - --- 20 -select count(*) from partition_timestamp_1 where dt > timestamp '2000-01-01 01:00:00' and region = '1' +POSTHOOK: query: select count(*) from partition_timestamp_1 where dt > timestamp '2000-01-01 01:00:00' and region = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### 20 -PREHOOK: query: -- 10 -select count(*) from partition_timestamp_1 where dt < timestamp '2000-01-02 01:00:00' and region = '1' +PREHOOK: query: select count(*) from partition_timestamp_1 where dt < timestamp '2000-01-02 01:00:00' and region = '1' PREHOOK: type: QUERY PREHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### -POSTHOOK: query: -- 10 -select count(*) from partition_timestamp_1 where dt < timestamp '2000-01-02 01:00:00' and region = '1' +POSTHOOK: query: select count(*) from partition_timestamp_1 where dt < timestamp '2000-01-02 01:00:00' and region = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### 10 -PREHOOK: query: -- 20 -select count(*) from partition_timestamp_1 where dt >= timestamp '2000-01-02 01:00:00' and region = '1' +PREHOOK: query: select count(*) from partition_timestamp_1 where dt >= timestamp '2000-01-02 01:00:00' and region = '1' PREHOOK: type: QUERY PREHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### -POSTHOOK: query: -- 20 -select count(*) from partition_timestamp_1 where dt >= timestamp '2000-01-02 01:00:00' and region = '1' +POSTHOOK: query: select count(*) from partition_timestamp_1 where dt >= timestamp '2000-01-02 01:00:00' and region = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### 20 -PREHOOK: query: -- 10 -select count(*) from partition_timestamp_1 where dt <= timestamp '2000-01-01 01:00:00' and region = '1' +PREHOOK: query: select count(*) from partition_timestamp_1 where dt <= timestamp '2000-01-01 01:00:00' and region = '1' PREHOOK: type: QUERY PREHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### -POSTHOOK: query: -- 10 -select count(*) from partition_timestamp_1 where dt <= timestamp '2000-01-01 01:00:00' and region = '1' +POSTHOOK: query: select count(*) from partition_timestamp_1 where dt <= timestamp '2000-01-01 01:00:00' and region = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### 10 -PREHOOK: query: -- 20 -select count(*) from partition_timestamp_1 where dt <> timestamp '2000-01-01 01:00:00' and region = '1' +PREHOOK: query: select count(*) from partition_timestamp_1 where dt <> timestamp '2000-01-01 01:00:00' and region = '1' PREHOOK: type: QUERY PREHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### -POSTHOOK: query: -- 20 -select count(*) from partition_timestamp_1 where dt <> timestamp '2000-01-01 01:00:00' and region = '1' +POSTHOOK: query: select count(*) from partition_timestamp_1 where dt <> timestamp '2000-01-01 01:00:00' and region = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### 20 -PREHOOK: query: -- 10 -select count(*) from partition_timestamp_1 where dt between timestamp '1999-12-30 12:00:00' and timestamp '2000-01-03 12:00:00' and region = '1' +PREHOOK: query: select count(*) from partition_timestamp_1 where dt between timestamp '1999-12-30 12:00:00' and timestamp '2000-01-03 12:00:00' and region = '1' PREHOOK: type: QUERY PREHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### -POSTHOOK: query: -- 10 -select count(*) from partition_timestamp_1 where dt between timestamp '1999-12-30 12:00:00' and timestamp '2000-01-03 12:00:00' and region = '1' +POSTHOOK: query: select count(*) from partition_timestamp_1 where dt between timestamp '1999-12-30 12:00:00' and timestamp '2000-01-03 12:00:00' and region = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### 10 -PREHOOK: query: -- Try a string key with timestamp-like strings - --- 5 -select count(*) from partition_timestamp_1 where region = '2020-20-20' +PREHOOK: query: select count(*) from partition_timestamp_1 where region = '2020-20-20' PREHOOK: type: QUERY PREHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### -POSTHOOK: query: -- Try a string key with timestamp-like strings - --- 5 -select count(*) from partition_timestamp_1 where region = '2020-20-20' +POSTHOOK: query: select count(*) from partition_timestamp_1 where region = '2020-20-20' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### 5 -PREHOOK: query: -- 5 -select count(*) from partition_timestamp_1 where region > '2010-01-01' +PREHOOK: query: select count(*) from partition_timestamp_1 where region > '2010-01-01' PREHOOK: type: QUERY PREHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### -POSTHOOK: query: -- 5 -select count(*) from partition_timestamp_1 where region > '2010-01-01' +POSTHOOK: query: select count(*) from partition_timestamp_1 where region > '2010-01-01' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_timestamp_1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/partition_timestamp2.q.out b/ql/src/test/results/clientpositive/partition_timestamp2.q.out index f32538f..63ca197 100644 --- a/ql/src/test/results/clientpositive/partition_timestamp2.q.out +++ b/ql/src/test/results/clientpositive/partition_timestamp2.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- Exclude test on Windows due to space character being escaped in Hive paths on Windows. --- EXCLUDE_OS_WINDOWS -drop table partition_timestamp2_1 +PREHOOK: query: drop table partition_timestamp2_1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Exclude test on Windows due to space character being escaped in Hive paths on Windows. --- EXCLUDE_OS_WINDOWS -drop table partition_timestamp2_1 +POSTHOOK: query: drop table partition_timestamp2_1 POSTHOOK: type: DROPTABLE PREHOOK: query: create table partition_timestamp2_1 (key string, value string) partitioned by (dt timestamp, region int) PREHOOK: type: CREATETABLE @@ -14,8 +10,7 @@ POSTHOOK: query: create table partition_timestamp2_1 (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@partition_timestamp2_1 -PREHOOK: query: -- test timestamp literal syntax -from (select * from src tablesample (1 rows)) x +PREHOOK: query: from (select * from src tablesample (1 rows)) x insert overwrite table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 00:00:00', region=1) select * insert overwrite table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 01:00:00', region=1) select * insert overwrite table partition_timestamp2_1 partition(dt=timestamp '1999-01-01 00:00:00', region=2) select * @@ -26,8 +21,7 @@ PREHOOK: Output: default@partition_timestamp2_1@dt=1999-01-01 00%3A00%3A00.0/reg PREHOOK: Output: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00.0/region=2 PREHOOK: Output: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00.0/region=1 PREHOOK: Output: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00.0/region=1 -POSTHOOK: query: -- test timestamp literal syntax -from (select * from src tablesample (1 rows)) x +POSTHOOK: query: from (select * from src tablesample (1 rows)) x insert overwrite table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 00:00:00', region=1) select * insert overwrite table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 01:00:00', region=1) select * insert overwrite table partition_timestamp2_1 partition(dt=timestamp '1999-01-01 00:00:00', region=2) select * @@ -86,14 +80,12 @@ POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00.0/reg 238 val_238 1999-01-01 01:00:00 2 238 val_238 2000-01-01 00:00:00 1 238 val_238 2000-01-01 01:00:00 1 -PREHOOK: query: -- insert overwrite -insert overwrite table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 00:00:00', region=1) +PREHOOK: query: insert overwrite table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 00:00:00', region=1) select 'changed_key', 'changed_value' from src tablesample (2 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00.0/region=1 -POSTHOOK: query: -- insert overwrite -insert overwrite table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 00:00:00', region=1) +POSTHOOK: query: insert overwrite table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 00:00:00', region=1) select 'changed_key', 'changed_value' from src tablesample (2 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -121,12 +113,10 @@ POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00.0/reg changed_key changed_value 2000-01-01 00:00:00 1 changed_key changed_value 2000-01-01 00:00:00 1 238 val_238 2000-01-01 01:00:00 1 -PREHOOK: query: -- truncate -truncate table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 00:00:00', region=1) +PREHOOK: query: truncate table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 00:00:00', region=1) PREHOOK: type: TRUNCATETABLE PREHOOK: Output: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00.0/region=1 -POSTHOOK: query: -- truncate -truncate table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 00:00:00', region=1) +POSTHOOK: query: truncate table partition_timestamp2_1 partition(dt=timestamp '2000-01-01 00:00:00', region=1) POSTHOOK: type: TRUNCATETABLE POSTHOOK: Output: default@partition_timestamp2_1@dt=2000-01-01 00%3A00%3A00.0/region=1 PREHOOK: query: select distinct dt from partition_timestamp2_1 @@ -167,12 +157,10 @@ POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00.0/reg 238 val_238 1999-01-01 00:00:00 2 238 val_238 1999-01-01 01:00:00 2 238 val_238 2000-01-01 01:00:00 1 -PREHOOK: query: -- alter table add partition -alter table partition_timestamp2_1 add partition (dt=timestamp '1980-01-02 00:00:00', region=3) +PREHOOK: query: alter table partition_timestamp2_1 add partition (dt=timestamp '1980-01-02 00:00:00', region=3) PREHOOK: type: ALTERTABLE_ADDPARTS PREHOOK: Output: default@partition_timestamp2_1 -POSTHOOK: query: -- alter table add partition -alter table partition_timestamp2_1 add partition (dt=timestamp '1980-01-02 00:00:00', region=3) +POSTHOOK: query: alter table partition_timestamp2_1 add partition (dt=timestamp '1980-01-02 00:00:00', region=3) POSTHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: Output: default@partition_timestamp2_1 POSTHOOK: Output: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00.0/region=3 @@ -218,13 +206,11 @@ POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00.0/reg 238 val_238 1999-01-01 00:00:00 2 238 val_238 1999-01-01 01:00:00 2 238 val_238 2000-01-01 01:00:00 1 -PREHOOK: query: -- alter table drop -alter table partition_timestamp2_1 drop partition (dt=timestamp '1999-01-01 01:00:00', region=2) +PREHOOK: query: alter table partition_timestamp2_1 drop partition (dt=timestamp '1999-01-01 01:00:00', region=2) PREHOOK: type: ALTERTABLE_DROPPARTS PREHOOK: Input: default@partition_timestamp2_1 PREHOOK: Output: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00.0/region=2 -POSTHOOK: query: -- alter table drop -alter table partition_timestamp2_1 drop partition (dt=timestamp '1999-01-01 01:00:00', region=2) +POSTHOOK: query: alter table partition_timestamp2_1 drop partition (dt=timestamp '1999-01-01 01:00:00', region=2) POSTHOOK: type: ALTERTABLE_DROPPARTS POSTHOOK: Input: default@partition_timestamp2_1 POSTHOOK: Output: default@partition_timestamp2_1@dt=1999-01-01 01%3A00%3A00.0/region=2 @@ -264,27 +250,23 @@ POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00.0/reg #### A masked pattern was here #### 238 val_238 1999-01-01 00:00:00 2 238 val_238 2000-01-01 01:00:00 1 -PREHOOK: query: -- alter table set serde -alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) +PREHOOK: query: alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' PREHOOK: type: ALTERPARTITION_SERIALIZER PREHOOK: Input: default@partition_timestamp2_1 PREHOOK: Output: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00.0/region=3 -POSTHOOK: query: -- alter table set serde -alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) +POSTHOOK: query: alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) set serde 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' POSTHOOK: type: ALTERPARTITION_SERIALIZER POSTHOOK: Input: default@partition_timestamp2_1 POSTHOOK: Input: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00.0/region=3 POSTHOOK: Output: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00.0/region=3 -PREHOOK: query: -- alter table set fileformat -alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) +PREHOOK: query: alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) set fileformat rcfile PREHOOK: type: ALTERPARTITION_FILEFORMAT PREHOOK: Input: default@partition_timestamp2_1 PREHOOK: Output: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00.0/region=3 -POSTHOOK: query: -- alter table set fileformat -alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) +POSTHOOK: query: alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) set fileformat rcfile POSTHOOK: type: ALTERPARTITION_FILEFORMAT POSTHOOK: Input: default@partition_timestamp2_1 @@ -340,15 +322,13 @@ POSTHOOK: Input: default@partition_timestamp2_1@dt=2000-01-01 01%3A00%3A00.0/reg 238 val_238 1999-01-01 00:00:00 2 238 val_238 2000-01-01 01:00:00 1 86 val_86 1980-01-02 00:00:00 3 -PREHOOK: query: -- alter table set location -alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) +PREHOOK: query: alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) #### A masked pattern was here #### PREHOOK: type: ALTERPARTITION_LOCATION PREHOOK: Input: default@partition_timestamp2_1 PREHOOK: Output: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00.0/region=3 #### A masked pattern was here #### -POSTHOOK: query: -- alter table set location -alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) +POSTHOOK: query: alter table partition_timestamp2_1 partition(dt=timestamp '1980-01-02 00:00:00', region=3) #### A masked pattern was here #### POSTHOOK: type: ALTERPARTITION_LOCATION POSTHOOK: Input: default@partition_timestamp2_1 @@ -373,13 +353,11 @@ dt timestamp region int #### A masked pattern was here #### -PREHOOK: query: -- alter table touch -alter table partition_timestamp2_1 touch partition(dt=timestamp '1980-01-02 00:00:00', region=3) +PREHOOK: query: alter table partition_timestamp2_1 touch partition(dt=timestamp '1980-01-02 00:00:00', region=3) PREHOOK: type: ALTERTABLE_TOUCH PREHOOK: Input: default@partition_timestamp2_1 PREHOOK: Output: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00.0/region=3 -POSTHOOK: query: -- alter table touch -alter table partition_timestamp2_1 touch partition(dt=timestamp '1980-01-02 00:00:00', region=3) +POSTHOOK: query: alter table partition_timestamp2_1 touch partition(dt=timestamp '1980-01-02 00:00:00', region=3) POSTHOOK: type: ALTERTABLE_TOUCH POSTHOOK: Input: default@partition_timestamp2_1 POSTHOOK: Input: default@partition_timestamp2_1@dt=1980-01-02 00%3A00%3A00.0/region=3 diff --git a/ql/src/test/results/clientpositive/partition_type_check.q.out b/ql/src/test/results/clientpositive/partition_type_check.q.out index e25d527..c28fb0c 100644 --- a/ql/src/test/results/clientpositive/partition_type_check.q.out +++ b/ql/src/test/results/clientpositive/partition_type_check.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- begin part(string, string) pass(string, int) -CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day string) stored as textfile +PREHOOK: query: CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tab1 -POSTHOOK: query: -- begin part(string, string) pass(string, int) -CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day string) stored as textfile +POSTHOOK: query: CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tab1 @@ -41,13 +39,11 @@ POSTHOOK: query: drop table tab1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@tab1 POSTHOOK: Output: default@tab1 -PREHOOK: query: -- begin part(string, int) pass(string, string) -CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day int) stored as textfile +PREHOOK: query: CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day int) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tab1 -POSTHOOK: query: -- begin part(string, int) pass(string, string) -CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day int) stored as textfile +POSTHOOK: query: CREATE TABLE tab1 (id1 int,id2 string) PARTITIONED BY(month string,day int) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tab1 @@ -84,13 +80,11 @@ POSTHOOK: query: drop table tab1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@tab1 POSTHOOK: Output: default@tab1 -PREHOOK: query: -- begin part(string, date) pass(string, date) -create table tab1 (id1 int, id2 string) PARTITIONED BY(month string,day date) stored as textfile +PREHOOK: query: create table tab1 (id1 int, id2 string) PARTITIONED BY(month string,day date) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tab1 -POSTHOOK: query: -- begin part(string, date) pass(string, date) -create table tab1 (id1 int, id2 string) PARTITIONED BY(month string,day date) stored as textfile +POSTHOOK: query: create table tab1 (id1 int, id2 string) PARTITIONED BY(month string,day date) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tab1 diff --git a/ql/src/test/results/clientpositive/partition_type_in_plan.q.out b/ql/src/test/results/clientpositive/partition_type_in_plan.q.out index 58b8e0c..6082c29 100644 --- a/ql/src/test/results/clientpositive/partition_type_in_plan.q.out +++ b/ql/src/test/results/clientpositive/partition_type_in_plan.q.out @@ -1,23 +1,17 @@ -PREHOOK: query: -- Test partition column type is considered as the type given in table def --- and not as 'string' -CREATE TABLE datePartTbl(col1 string) PARTITIONED BY (date_prt date) +PREHOOK: query: CREATE TABLE datePartTbl(col1 string) PARTITIONED BY (date_prt date) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@datePartTbl -POSTHOOK: query: -- Test partition column type is considered as the type given in table def --- and not as 'string' -CREATE TABLE datePartTbl(col1 string) PARTITIONED BY (date_prt date) +POSTHOOK: query: CREATE TABLE datePartTbl(col1 string) PARTITIONED BY (date_prt date) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@datePartTbl -PREHOOK: query: -- Add test partitions and some sample data -INSERT OVERWRITE TABLE datePartTbl PARTITION(date_prt='2014-08-09') +PREHOOK: query: INSERT OVERWRITE TABLE datePartTbl PARTITION(date_prt='2014-08-09') SELECT 'col1-2014-08-09' FROM src LIMIT 1 PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@dateparttbl@date_prt=2014-08-09 -POSTHOOK: query: -- Add test partitions and some sample data -INSERT OVERWRITE TABLE datePartTbl PARTITION(date_prt='2014-08-09') +POSTHOOK: query: INSERT OVERWRITE TABLE datePartTbl PARTITION(date_prt='2014-08-09') SELECT 'col1-2014-08-09' FROM src LIMIT 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -34,14 +28,12 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@dateparttbl@date_prt=2014-08-10 POSTHOOK: Lineage: dateparttbl PARTITION(date_prt=2014-08-10).col1 SIMPLE [] -PREHOOK: query: -- Query where 'date_prt' value is restricted to given values in IN operator. -SELECT * FROM datePartTbl WHERE date_prt IN (CAST('2014-08-09' AS DATE), CAST('2014-08-08' AS DATE)) +PREHOOK: query: SELECT * FROM datePartTbl WHERE date_prt IN (CAST('2014-08-09' AS DATE), CAST('2014-08-08' AS DATE)) PREHOOK: type: QUERY PREHOOK: Input: default@dateparttbl PREHOOK: Input: default@dateparttbl@date_prt=2014-08-09 #### A masked pattern was here #### -POSTHOOK: query: -- Query where 'date_prt' value is restricted to given values in IN operator. -SELECT * FROM datePartTbl WHERE date_prt IN (CAST('2014-08-09' AS DATE), CAST('2014-08-08' AS DATE)) +POSTHOOK: query: SELECT * FROM datePartTbl WHERE date_prt IN (CAST('2014-08-09' AS DATE), CAST('2014-08-08' AS DATE)) POSTHOOK: type: QUERY POSTHOOK: Input: default@dateparttbl POSTHOOK: Input: default@dateparttbl@date_prt=2014-08-09 diff --git a/ql/src/test/results/clientpositive/partition_varchar1.q.out b/ql/src/test/results/clientpositive/partition_varchar1.q.out index e6e770a..93c9adf 100644 --- a/ql/src/test/results/clientpositive/partition_varchar1.q.out +++ b/ql/src/test/results/clientpositive/partition_varchar1.q.out @@ -91,127 +91,101 @@ POSTHOOK: Input: default@partition_varchar_1@dt=2000-01-01/region=2 27 val_27 2000-01-01 2 311 val_311 2000-01-01 2 86 val_86 2000-01-01 2 -PREHOOK: query: -- 15 -select count(*) from partition_varchar_1 where dt = '2000-01-01' +PREHOOK: query: select count(*) from partition_varchar_1 where dt = '2000-01-01' PREHOOK: type: QUERY PREHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### -POSTHOOK: query: -- 15 -select count(*) from partition_varchar_1 where dt = '2000-01-01' +POSTHOOK: query: select count(*) from partition_varchar_1 where dt = '2000-01-01' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### 15 -PREHOOK: query: -- 5 -select count(*) from partition_varchar_1 where dt = '2000-01-01' and region = 2 +PREHOOK: query: select count(*) from partition_varchar_1 where dt = '2000-01-01' and region = 2 PREHOOK: type: QUERY PREHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### -POSTHOOK: query: -- 5 -select count(*) from partition_varchar_1 where dt = '2000-01-01' and region = 2 +POSTHOOK: query: select count(*) from partition_varchar_1 where dt = '2000-01-01' and region = 2 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### 5 -PREHOOK: query: -- 11 -select count(*) from partition_varchar_1 where dt = '2013-08-08' and region = 10 +PREHOOK: query: select count(*) from partition_varchar_1 where dt = '2013-08-08' and region = 10 PREHOOK: type: QUERY PREHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### -POSTHOOK: query: -- 11 -select count(*) from partition_varchar_1 where dt = '2013-08-08' and region = 10 +POSTHOOK: query: select count(*) from partition_varchar_1 where dt = '2013-08-08' and region = 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### 11 -PREHOOK: query: -- 30 -select count(*) from partition_varchar_1 where region = 1 +PREHOOK: query: select count(*) from partition_varchar_1 where region = 1 PREHOOK: type: QUERY PREHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### -POSTHOOK: query: -- 30 -select count(*) from partition_varchar_1 where region = 1 +POSTHOOK: query: select count(*) from partition_varchar_1 where region = 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### 30 -PREHOOK: query: -- 0 -select count(*) from partition_varchar_1 where dt = '2000-01-01' and region = 3 +PREHOOK: query: select count(*) from partition_varchar_1 where dt = '2000-01-01' and region = 3 PREHOOK: type: QUERY PREHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### -POSTHOOK: query: -- 0 -select count(*) from partition_varchar_1 where dt = '2000-01-01' and region = 3 +POSTHOOK: query: select count(*) from partition_varchar_1 where dt = '2000-01-01' and region = 3 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### 0 -PREHOOK: query: -- 0 -select count(*) from partition_varchar_1 where dt = '1999-01-01' +PREHOOK: query: select count(*) from partition_varchar_1 where dt = '1999-01-01' PREHOOK: type: QUERY PREHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### -POSTHOOK: query: -- 0 -select count(*) from partition_varchar_1 where dt = '1999-01-01' +POSTHOOK: query: select count(*) from partition_varchar_1 where dt = '1999-01-01' POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### 0 -PREHOOK: query: -- Try other comparison operations - --- 20 -select count(*) from partition_varchar_1 where dt > '2000-01-01' and region = 1 +PREHOOK: query: select count(*) from partition_varchar_1 where dt > '2000-01-01' and region = 1 PREHOOK: type: QUERY PREHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### -POSTHOOK: query: -- Try other comparison operations - --- 20 -select count(*) from partition_varchar_1 where dt > '2000-01-01' and region = 1 +POSTHOOK: query: select count(*) from partition_varchar_1 where dt > '2000-01-01' and region = 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### 20 -PREHOOK: query: -- 10 -select count(*) from partition_varchar_1 where dt < '2000-01-02' and region = 1 +PREHOOK: query: select count(*) from partition_varchar_1 where dt < '2000-01-02' and region = 1 PREHOOK: type: QUERY PREHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### -POSTHOOK: query: -- 10 -select count(*) from partition_varchar_1 where dt < '2000-01-02' and region = 1 +POSTHOOK: query: select count(*) from partition_varchar_1 where dt < '2000-01-02' and region = 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### 10 -PREHOOK: query: -- 20 -select count(*) from partition_varchar_1 where dt >= '2000-01-02' and region = 1 +PREHOOK: query: select count(*) from partition_varchar_1 where dt >= '2000-01-02' and region = 1 PREHOOK: type: QUERY PREHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### -POSTHOOK: query: -- 20 -select count(*) from partition_varchar_1 where dt >= '2000-01-02' and region = 1 +POSTHOOK: query: select count(*) from partition_varchar_1 where dt >= '2000-01-02' and region = 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### 20 -PREHOOK: query: -- 10 -select count(*) from partition_varchar_1 where dt <= '2000-01-01' and region = 1 +PREHOOK: query: select count(*) from partition_varchar_1 where dt <= '2000-01-01' and region = 1 PREHOOK: type: QUERY PREHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### -POSTHOOK: query: -- 10 -select count(*) from partition_varchar_1 where dt <= '2000-01-01' and region = 1 +POSTHOOK: query: select count(*) from partition_varchar_1 where dt <= '2000-01-01' and region = 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### 10 -PREHOOK: query: -- 20 -select count(*) from partition_varchar_1 where dt <> '2000-01-01' and region = 1 +PREHOOK: query: select count(*) from partition_varchar_1 where dt <> '2000-01-01' and region = 1 PREHOOK: type: QUERY PREHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### -POSTHOOK: query: -- 20 -select count(*) from partition_varchar_1 where dt <> '2000-01-01' and region = 1 +POSTHOOK: query: select count(*) from partition_varchar_1 where dt <> '2000-01-01' and region = 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@partition_varchar_1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat10.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat10.q.out index 171193e..9faa94f 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat10.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat10.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- This tests that the schema can be changed for binary serde data -create table prt(key string, value string) partitioned by (dt string) +PREHOOK: query: create table prt(key string, value string) partitioned by (dt string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@prt -POSTHOOK: query: -- This tests that the schema can be changed for binary serde data -create table prt(key string, value string) partitioned by (dt string) +POSTHOOK: query: create table prt(key string, value string) partitioned by (dt string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@prt diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat11.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat11.q.out index 0b39b77..d54fd0a 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat11.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat11.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- This tests that the schema can be changed for binary serde data -create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: -- This tests that the schema can be changed for binary serde data -create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +POSTHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@partition_test_partitioned diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat12.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat12.q.out index 2bf75a7..4ec48ad 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat12.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat12.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- This tests that the schema can be changed for binary serde data -create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: -- This tests that the schema can be changed for binary serde data -create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +POSTHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@partition_test_partitioned diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat13.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat13.q.out index da6881c..d7a0fad 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat13.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat13.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- This tests that the schema can be changed for partitioned tables for binary serde data for joins -create table T1(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: query: create table T1(key string, value string) partitioned by (dt string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- This tests that the schema can be changed for partitioned tables for binary serde data for joins -create table T1(key string, value string) partitioned by (dt string) stored as rcfile +POSTHOOK: query: create table T1(key string, value string) partitioned by (dt string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat14.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat14.q.out index 3e4e7c3..d640e74 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat14.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat14.q.out @@ -80,8 +80,7 @@ POSTHOOK: query: alter table tbl1 change key key string POSTHOOK: type: ALTERTABLE_RENAMECOL POSTHOOK: Input: default@tbl1 POSTHOOK: Output: default@tbl1 -PREHOOK: query: -- The subquery itself is being map-joined. Multiple partitions of tbl1 with different schemas are being read for tbl2 -select /*+mapjoin(subq1)*/ count(*) from +PREHOOK: query: select /*+mapjoin(subq1)*/ count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 @@ -93,8 +92,7 @@ PREHOOK: Input: default@tbl1@ds=2 PREHOOK: Input: default@tbl2 PREHOOK: Input: default@tbl2@ds=1 #### A masked pattern was here #### -POSTHOOK: query: -- The subquery itself is being map-joined. Multiple partitions of tbl1 with different schemas are being read for tbl2 -select /*+mapjoin(subq1)*/ count(*) from +POSTHOOK: query: select /*+mapjoin(subq1)*/ count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 @@ -107,10 +105,7 @@ POSTHOOK: Input: default@tbl2 POSTHOOK: Input: default@tbl2@ds=1 #### A masked pattern was here #### 40 -PREHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should --- be converted to a bucketized mapside join. Multiple partitions of tbl1 with different schemas are being read for each --- bucket of tbl2 -select /*+mapjoin(subq1)*/ count(*) from +PREHOOK: query: select /*+mapjoin(subq1)*/ count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 @@ -122,10 +117,7 @@ PREHOOK: Input: default@tbl1@ds=2 PREHOOK: Input: default@tbl2 PREHOOK: Input: default@tbl2@ds=1 #### A masked pattern was here #### -POSTHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should --- be converted to a bucketized mapside join. Multiple partitions of tbl1 with different schemas are being read for each --- bucket of tbl2 -select /*+mapjoin(subq1)*/ count(*) from +POSTHOOK: query: select /*+mapjoin(subq1)*/ count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 @@ -138,10 +130,7 @@ POSTHOOK: Input: default@tbl2 POSTHOOK: Input: default@tbl2@ds=1 #### A masked pattern was here #### 40 -PREHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join. Multiple partitions of tbl1 with different schemas are being read for a --- given file of tbl2 -select /*+mapjoin(subq1)*/ count(*) from +PREHOOK: query: select /*+mapjoin(subq1)*/ count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 @@ -153,10 +142,7 @@ PREHOOK: Input: default@tbl1@ds=2 PREHOOK: Input: default@tbl2 PREHOOK: Input: default@tbl2@ds=1 #### A masked pattern was here #### -POSTHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join. Multiple partitions of tbl1 with different schemas are being read for a --- given file of tbl2 -select /*+mapjoin(subq1)*/ count(*) from +POSTHOOK: query: select /*+mapjoin(subq1)*/ count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 @@ -169,9 +155,7 @@ POSTHOOK: Input: default@tbl2 POSTHOOK: Input: default@tbl2@ds=1 #### A masked pattern was here #### 40 -PREHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side --- join should be performed. Multiple partitions of tbl1 with different schemas are being read for tbl2 -select /*+mapjoin(subq1)*/ count(*) from +PREHOOK: query: select /*+mapjoin(subq1)*/ count(*) from (select a.key+1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 join (select a.key+1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 @@ -183,9 +167,7 @@ PREHOOK: Input: default@tbl1@ds=2 PREHOOK: Input: default@tbl2 PREHOOK: Input: default@tbl2@ds=1 #### A masked pattern was here #### -POSTHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side --- join should be performed. Multiple partitions of tbl1 with different schemas are being read for tbl2 -select /*+mapjoin(subq1)*/ count(*) from +POSTHOOK: query: select /*+mapjoin(subq1)*/ count(*) from (select a.key+1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 join (select a.key+1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat15.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat15.q.out index 16d259e..c992d37 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat15.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat15.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- This tests that the schema can be changed for binary serde data -create table partition_test_partitioned(key string, value string) +PREHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: -- This tests that the schema can be changed for binary serde data -create table partition_test_partitioned(key string, value string) +POSTHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat16.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat16.q.out index c1bcb1a..906f53c 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat16.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat16.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- This tests that the schema can be changed for binary serde data -create table partition_test_partitioned(key string, value string) +PREHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: -- This tests that the schema can be changed for binary serde data -create table partition_test_partitioned(key string, value string) +POSTHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat17.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat17.q.out index 028a26e..3cb6e52 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat17.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat17.q.out @@ -1,14 +1,6 @@ -PREHOOK: query: -- HIVE-5199, HIVE-5285 : CustomSerDe(1, 2, 3) are used here. --- The final results should be all NULL columns deserialized using --- CustomSerDe(1, 2, 3) irrespective of the inserted values - -DROP TABLE PW17 +PREHOOK: query: DROP TABLE PW17 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- HIVE-5199, HIVE-5285 : CustomSerDe(1, 2, 3) are used here. --- The final results should be all NULL columns deserialized using --- CustomSerDe(1, 2, 3) irrespective of the inserted values - -DROP TABLE PW17 +POSTHOOK: query: DROP TABLE PW17 POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE PW17(`USER` STRING, COMPLEXDT ARRAY) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1' PREHOOK: type: CREATETABLE @@ -44,14 +36,12 @@ POSTHOOK: query: ALTER TABLE PW17 SET SERDE 'org.apache.hadoop.hive.serde2.Custo POSTHOOK: type: ALTERTABLE_SERIALIZER POSTHOOK: Input: default@pw17 POSTHOOK: Output: default@pw17 -PREHOOK: query: -- Without the fix HIVE-5199, will throw cast exception via FetchOperator -SELECT * FROM PW17 +PREHOOK: query: SELECT * FROM PW17 PREHOOK: type: QUERY PREHOOK: Input: default@pw17 PREHOOK: Input: default@pw17@year=1 #### A masked pattern was here #### -POSTHOOK: query: -- Without the fix HIVE-5199, will throw cast exception via FetchOperator -SELECT * FROM PW17 +POSTHOOK: query: SELECT * FROM PW17 POSTHOOK: type: QUERY POSTHOOK: Input: default@pw17 POSTHOOK: Input: default@pw17@year=1 @@ -60,11 +50,9 @@ NULL NULL 1 NULL NULL 1 NULL NULL 1 NULL NULL 1 -PREHOOK: query: -- Test for non-parititioned table. -DROP TABLE PW17_2 +PREHOOK: query: DROP TABLE PW17_2 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Test for non-parititioned table. -DROP TABLE PW17_2 +POSTHOOK: query: DROP TABLE PW17_2 POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE PW17_2(`USER` STRING, COMPLEXDT ARRAY) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe1' PREHOOK: type: CREATETABLE @@ -82,13 +70,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE P POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@pw17_2 -PREHOOK: query: -- Without the fix HIVE-5199, will throw cast exception via MapOperator -SELECT COUNT(*) FROM PW17_2 +PREHOOK: query: SELECT COUNT(*) FROM PW17_2 PREHOOK: type: QUERY PREHOOK: Input: default@pw17_2 #### A masked pattern was here #### -POSTHOOK: query: -- Without the fix HIVE-5199, will throw cast exception via MapOperator -SELECT COUNT(*) FROM PW17_2 +POSTHOOK: query: SELECT COUNT(*) FROM PW17_2 POSTHOOK: type: QUERY POSTHOOK: Input: default@pw17_2 #### A masked pattern was here #### @@ -131,14 +117,12 @@ POSTHOOK: query: ALTER TABLE PW17_3 SET SERDE 'org.apache.hadoop.hive.serde2.Cus POSTHOOK: type: ALTERTABLE_SERIALIZER POSTHOOK: Input: default@pw17_3 POSTHOOK: Output: default@pw17_3 -PREHOOK: query: -- Without the fix HIVE-5285, will throw cast exception via FetchOperator -SELECT * FROM PW17 +PREHOOK: query: SELECT * FROM PW17 PREHOOK: type: QUERY PREHOOK: Input: default@pw17 PREHOOK: Input: default@pw17@year=1 #### A masked pattern was here #### -POSTHOOK: query: -- Without the fix HIVE-5285, will throw cast exception via FetchOperator -SELECT * FROM PW17 +POSTHOOK: query: SELECT * FROM PW17 POSTHOOK: type: QUERY POSTHOOK: Input: default@pw17 POSTHOOK: Input: default@pw17@year=1 @@ -167,13 +151,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE P POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@pw17_4 -PREHOOK: query: -- Without the fix HIVE-5285, will throw cast exception via MapOperator -SELECT COUNT(*) FROM PW17_4 +PREHOOK: query: SELECT COUNT(*) FROM PW17_4 PREHOOK: type: QUERY PREHOOK: Input: default@pw17_4 #### A masked pattern was here #### -POSTHOOK: query: -- Without the fix HIVE-5285, will throw cast exception via MapOperator -SELECT COUNT(*) FROM PW17_4 +POSTHOOK: query: SELECT COUNT(*) FROM PW17_4 POSTHOOK: type: QUERY POSTHOOK: Input: default@pw17_4 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat18.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat18.q.out index 6303d44..0fa59b6 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat18.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat18.q.out @@ -1,16 +1,6 @@ -PREHOOK: query: -- HIVE-5202 : Tests for SettableUnionObjectInspectors --- CustomSerDe(4,5) are used here. --- The final results should be all NULL columns deserialized using --- CustomSerDe(4, 5) irrespective of the inserted values - -DROP TABLE PW18 +PREHOOK: query: DROP TABLE PW18 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- HIVE-5202 : Tests for SettableUnionObjectInspectors --- CustomSerDe(4,5) are used here. --- The final results should be all NULL columns deserialized using --- CustomSerDe(4, 5) irrespective of the inserted values - -DROP TABLE PW18 +POSTHOOK: query: DROP TABLE PW18 POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE PW18(`USER` STRING, COMPLEXDT UNIONTYPE) PARTITIONED BY (YEAR STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5' PREHOOK: type: CREATETABLE @@ -38,14 +28,12 @@ POSTHOOK: type: ALTERPARTITION_SERIALIZER POSTHOOK: Input: default@pw18 POSTHOOK: Input: default@pw18@year=1 POSTHOOK: Output: default@pw18@year=1 -PREHOOK: query: -- Without the fix HIVE-5202, will throw unsupported data type exception. -SELECT * FROM PW18 +PREHOOK: query: SELECT * FROM PW18 PREHOOK: type: QUERY PREHOOK: Input: default@pw18 PREHOOK: Input: default@pw18@year=1 #### A masked pattern was here #### -POSTHOOK: query: -- Without the fix HIVE-5202, will throw unsupported data type exception. -SELECT * FROM PW18 +POSTHOOK: query: SELECT * FROM PW18 POSTHOOK: type: QUERY POSTHOOK: Input: default@pw18 POSTHOOK: Input: default@pw18@year=1 @@ -54,11 +42,9 @@ NULL NULL 1 NULL NULL 1 NULL NULL 1 NULL NULL 1 -PREHOOK: query: -- Test for non-parititioned table. -DROP TABLE PW18_2 +PREHOOK: query: DROP TABLE PW18_2 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Test for non-parititioned table. -DROP TABLE PW18_2 +POSTHOOK: query: DROP TABLE PW18_2 POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE PW18_2(`USER` STRING, COMPLEXDT UNIONTYPE) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.CustomSerDe5' PREHOOK: type: CREATETABLE @@ -76,13 +62,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/pw17.txt' INTO TABLE P POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@pw18_2 -PREHOOK: query: -- Without the fix HIVE-5202, will throw unsupported data type exception -SELECT COUNT(*) FROM PW18_2 +PREHOOK: query: SELECT COUNT(*) FROM PW18_2 PREHOOK: type: QUERY PREHOOK: Input: default@pw18_2 #### A masked pattern was here #### -POSTHOOK: query: -- Without the fix HIVE-5202, will throw unsupported data type exception -SELECT COUNT(*) FROM PW18_2 +POSTHOOK: query: SELECT COUNT(*) FROM PW18_2 POSTHOOK: type: QUERY POSTHOOK: Input: default@pw18_2 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat2.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat2.q.out index c8abc9e..de239ca 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat2.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat2.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF - -create table partition_test_partitioned(key string, value string) partitioned by (dt string) +PREHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: -- SORT_BEFORE_DIFF - -create table partition_test_partitioned(key string, value string) partitioned by (dt string) +POSTHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@partition_test_partitioned diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat8.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat8.q.out index a7b1123..d64649b 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat8.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat8.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- This tests that a query can span multiple partitions which can not only have different file formats, but --- also different serdes -create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: -- This tests that a query can span multiple partitions which can not only have different file formats, but --- also different serdes -create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +POSTHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@partition_test_partitioned diff --git a/ql/src/test/results/clientpositive/partition_wise_fileformat9.q.out b/ql/src/test/results/clientpositive/partition_wise_fileformat9.q.out index 706d647..d2f0c78 100644 --- a/ql/src/test/results/clientpositive/partition_wise_fileformat9.q.out +++ b/ql/src/test/results/clientpositive/partition_wise_fileformat9.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- This tests that a query can span multiple partitions which can not only have different file formats, but --- also different serdes -create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +PREHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@partition_test_partitioned -POSTHOOK: query: -- This tests that a query can span multiple partitions which can not only have different file formats, but --- also different serdes -create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile +POSTHOOK: query: create table partition_test_partitioned(key string, value string) partitioned by (dt string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@partition_test_partitioned diff --git a/ql/src/test/results/clientpositive/pcr.q.out b/ql/src/test/results/clientpositive/pcr.q.out index 2679aa7..3a86e46 100644 --- a/ql/src/test/results/clientpositive/pcr.q.out +++ b/ql/src/test/results/clientpositive/pcr.q.out @@ -5094,11 +5094,9 @@ POSTHOOK: query: drop table pcr_t3 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@pcr_t3 POSTHOOK: Output: default@pcr_t3 -PREHOOK: query: -- Test cases when a non-boolean ds expression has same and different values for all possible ds values: -drop table pcr_foo +PREHOOK: query: drop table pcr_foo PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Test cases when a non-boolean ds expression has same and different values for all possible ds values: -drop table pcr_foo +POSTHOOK: query: drop table pcr_foo POSTHOOK: type: DROPTABLE PREHOOK: query: create table pcr_foo (key int, value string) partitioned by (ds int) PREHOOK: type: CREATETABLE @@ -5138,16 +5136,14 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@pcr_foo@ds=7 POSTHOOK: Lineage: pcr_foo PARTITION(ds=7).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: pcr_foo PARTITION(ds=7).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- the condition is 'true' for all the 3 partitions (ds=3,5,7): -select key, value, ds from pcr_foo where (ds % 2 == 1) +PREHOOK: query: select key, value, ds from pcr_foo where (ds % 2 == 1) PREHOOK: type: QUERY PREHOOK: Input: default@pcr_foo PREHOOK: Input: default@pcr_foo@ds=3 PREHOOK: Input: default@pcr_foo@ds=5 PREHOOK: Input: default@pcr_foo@ds=7 #### A masked pattern was here #### -POSTHOOK: query: -- the condition is 'true' for all the 3 partitions (ds=3,5,7): -select key, value, ds from pcr_foo where (ds % 2 == 1) +POSTHOOK: query: select key, value, ds from pcr_foo where (ds % 2 == 1) POSTHOOK: type: QUERY POSTHOOK: Input: default@pcr_foo POSTHOOK: Input: default@pcr_foo@ds=3 @@ -5184,15 +5180,13 @@ POSTHOOK: Input: default@pcr_foo@ds=7 5 val_5 7 8 val_8 7 9 val_9 7 -PREHOOK: query: -- the condition is 'true' for partitions (ds=3,5) but 'false' of partition ds=7: -select key, value, ds from pcr_foo where (ds / 3 < 2) +PREHOOK: query: select key, value, ds from pcr_foo where (ds / 3 < 2) PREHOOK: type: QUERY PREHOOK: Input: default@pcr_foo PREHOOK: Input: default@pcr_foo@ds=3 PREHOOK: Input: default@pcr_foo@ds=5 #### A masked pattern was here #### -POSTHOOK: query: -- the condition is 'true' for partitions (ds=3,5) but 'false' of partition ds=7: -select key, value, ds from pcr_foo where (ds / 3 < 2) +POSTHOOK: query: select key, value, ds from pcr_foo where (ds / 3 < 2) POSTHOOK: type: QUERY POSTHOOK: Input: default@pcr_foo POSTHOOK: Input: default@pcr_foo@ds=3 @@ -5226,18 +5220,14 @@ POSTHOOK: query: drop table pcr_foo POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@pcr_foo POSTHOOK: Output: default@pcr_foo -PREHOOK: query: -- Cover org.apache.hadoop.hive.ql.optimizer.pcr.PcrExprProcFactory.FieldExprProcessor. --- Create a table with a struct data: -create table ab(strct struct) +PREHOOK: query: create table ab(strct struct) row format delimited fields terminated by '\t' collection items terminated by '\001' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@ab -POSTHOOK: query: -- Cover org.apache.hadoop.hive.ql.optimizer.pcr.PcrExprProcFactory.FieldExprProcessor. --- Create a table with a struct data: -create table ab(strct struct) +POSTHOOK: query: create table ab(strct struct) row format delimited fields terminated by '\t' collection items terminated by '\001' @@ -5254,11 +5244,9 @@ overwrite into table ab POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@ab -PREHOOK: query: -- Create partitioned table with struct data: -drop table foo_field +PREHOOK: query: drop table foo_field PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Create partitioned table with struct data: -drop table foo_field +POSTHOOK: query: drop table foo_field POSTHOOK: type: DROPTABLE PREHOOK: query: create table foo_field (s struct) partitioned by (ds int) PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/plan_json.q.out b/ql/src/test/results/clientpositive/plan_json.q.out index e28c239..75d5b73 100644 --- a/ql/src/test/results/clientpositive/plan_json.q.out +++ b/ql/src/test/results/clientpositive/plan_json.q.out @@ -1,11 +1,5 @@ -PREHOOK: query: -- explain plan json: the query gets the formatted json output of the query plan of the hive query - - -EXPLAIN FORMATTED SELECT count(1) FROM src +PREHOOK: query: EXPLAIN FORMATTED SELECT count(1) FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- explain plan json: the query gets the formatted json output of the query plan of the hive query - - -EXPLAIN FORMATTED SELECT count(1) FROM src +POSTHOOK: query: EXPLAIN FORMATTED SELECT count(1) FROM src POSTHOOK: type: QUERY {"STAGE DEPENDENCIES":{"Stage-0":{"ROOT STAGE":"TRUE"}},"STAGE PLANS":{"Stage-0":{"Fetch Operator":{"limit:":"1","Processor Tree:":{"ListSink":{}}}}}} diff --git a/ql/src/test/results/clientpositive/ppd_constant_where.q.out b/ql/src/test/results/clientpositive/ppd_constant_where.q.out index b268300..ee6cc08 100644 --- a/ql/src/test/results/clientpositive/ppd_constant_where.q.out +++ b/ql/src/test/results/clientpositive/ppd_constant_where.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- Test that the partition pruner does not fail when there is a constant expression in the filter - -EXPLAIN SELECT COUNT(*) FROM srcpart WHERE ds = '2008-04-08' and 'a' = 'a' +PREHOOK: query: EXPLAIN SELECT COUNT(*) FROM srcpart WHERE ds = '2008-04-08' and 'a' = 'a' PREHOOK: type: QUERY -POSTHOOK: query: -- Test that the partition pruner does not fail when there is a constant expression in the filter - -EXPLAIN SELECT COUNT(*) FROM srcpart WHERE ds = '2008-04-08' and 'a' = 'a' +POSTHOOK: query: EXPLAIN SELECT COUNT(*) FROM srcpart WHERE ds = '2008-04-08' and 'a' = 'a' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/ppd_field_garbage.q.out b/ql/src/test/results/clientpositive/ppd_field_garbage.q.out index bf16dd7..0dac980 100644 --- a/ql/src/test/results/clientpositive/ppd_field_garbage.q.out +++ b/ql/src/test/results/clientpositive/ppd_field_garbage.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- ppd leaves invalid expr in field expr -CREATE TABLE test_issue (fileid int, infos ARRAY>, test_c STRUCT>) +PREHOOK: query: CREATE TABLE test_issue (fileid int, infos ARRAY>, test_c STRUCT>) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_issue -POSTHOOK: query: -- ppd leaves invalid expr in field expr -CREATE TABLE test_issue (fileid int, infos ARRAY>, test_c STRUCT>) +POSTHOOK: query: CREATE TABLE test_issue (fileid int, infos ARRAY>, test_c STRUCT>) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_issue @@ -18,13 +16,11 @@ POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@test_issue POSTHOOK: Output: database:default POSTHOOK: Output: default@v_test_issue -PREHOOK: query: -- dummy data -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE test_issue +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE test_issue PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@test_issue -POSTHOOK: query: -- dummy data -LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE test_issue +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv1.txt' INTO TABLE test_issue POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@test_issue diff --git a/ql/src/test/results/clientpositive/ppd_gby_join.q.out b/ql/src/test/results/clientpositive/ppd_gby_join.q.out index 1894b04..a160410 100644 --- a/ql/src/test/results/clientpositive/ppd_gby_join.q.out +++ b/ql/src/test/results/clientpositive/ppd_gby_join.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT src1.c1, count(1) FROM (SELECT src.key AS c1, src.value AS c2 from src where src.key > '1' ) src1 @@ -10,9 +8,7 @@ ON src1.c1 = src2.c3 AND src1.c1 < '400' WHERE src1.c1 > '20' AND (src1.c2 < 'val_50' OR src1.c1 > '2') AND (src2.c3 > '50' OR src1.c1 < '50') AND (src2.c3 <> '4') GROUP BY src1.c1 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT src1.c1, count(1) FROM (SELECT src.key AS c1, src.value AS c2 from src where src.key > '1' ) src1 diff --git a/ql/src/test/results/clientpositive/ppd_join.q.out b/ql/src/test/results/clientpositive/ppd_join.q.out index 6081d48..e48c5e2 100644 --- a/ql/src/test/results/clientpositive/ppd_join.q.out +++ b/ql/src/test/results/clientpositive/ppd_join.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src where src.key > '1' ) src1 @@ -9,9 +7,7 @@ JOIN ON src1.c1 = src2.c3 AND src1.c1 < '400' WHERE src1.c1 > '20' and (src1.c2 < 'val_50' or src1.c1 > '2') and (src2.c3 > '50' or src1.c1 < '50') and (src2.c3 <> '4') PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src where src.key > '1' ) src1 diff --git a/ql/src/test/results/clientpositive/ppd_join2.q.out b/ql/src/test/results/clientpositive/ppd_join2.q.out index 729383a..d5f5016 100644 --- a/ql/src/test/results/clientpositive/ppd_join2.q.out +++ b/ql/src/test/results/clientpositive/ppd_join2.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src where src.key <> '302' ) src1 @@ -12,9 +10,7 @@ JOIN ON src1.c2 = src3.c6 WHERE src1.c1 <> '311' and (src1.c2 <> 'val_50' or src1.c1 > '1') and (src2.c3 <> '10' or src1.c1 <> '10') and (src2.c3 <> '14') and (sqrt(src3.c5) <> 13) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src where src.key <> '302' ) src1 diff --git a/ql/src/test/results/clientpositive/ppd_join3.q.out b/ql/src/test/results/clientpositive/ppd_join3.q.out index d50bf49..3a8c1d7 100644 --- a/ql/src/test/results/clientpositive/ppd_join3.q.out +++ b/ql/src/test/results/clientpositive/ppd_join3.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src where src.key <> '11' ) src1 @@ -12,9 +10,7 @@ JOIN ON src1.c1 = src3.c5 WHERE src1.c1 > '0' and (src1.c2 <> 'val_500' or src1.c1 > '1') and (src2.c3 > '10' or src1.c1 <> '10') and (src2.c3 <> '4') and (src3.c5 <> '1') PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src where src.key <> '11' ) src1 diff --git a/ql/src/test/results/clientpositive/ppd_multi_insert.q.out b/ql/src/test/results/clientpositive/ppd_multi_insert.q.out index 1a7019c..7e501c7 100644 --- a/ql/src/test/results/clientpositive/ppd_multi_insert.q.out +++ b/ql/src/test/results/clientpositive/ppd_multi_insert.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE mi1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE mi1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@mi1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE mi1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE mi1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@mi1 diff --git a/ql/src/test/results/clientpositive/ppd_outer_join1.q.out b/ql/src/test/results/clientpositive/ppd_outer_join1.q.out index fe64e32..8ec267c 100644 --- a/ql/src/test/results/clientpositive/ppd_outer_join1.q.out +++ b/ql/src/test/results/clientpositive/ppd_outer_join1.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN FROM src a LEFT OUTER JOIN @@ -9,9 +7,7 @@ EXPLAIN SELECT a.key, a.value, b.key, b.value WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM src a LEFT OUTER JOIN diff --git a/ql/src/test/results/clientpositive/ppd_outer_join2.q.out b/ql/src/test/results/clientpositive/ppd_outer_join2.q.out index 30ce9ad..8ce86d1 100644 --- a/ql/src/test/results/clientpositive/ppd_outer_join2.q.out +++ b/ql/src/test/results/clientpositive/ppd_outer_join2.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN FROM src a RIGHT OUTER JOIN @@ -9,9 +7,7 @@ EXPLAIN SELECT a.key, a.value, b.key, b.value WHERE a.key > '10' AND a.key < '20' AND b.key > '15' AND b.key < '25' PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM src a RIGHT OUTER JOIN diff --git a/ql/src/test/results/clientpositive/ppd_outer_join3.q.out b/ql/src/test/results/clientpositive/ppd_outer_join3.q.out index 4688751..b8b6168 100644 --- a/ql/src/test/results/clientpositive/ppd_outer_join3.q.out +++ b/ql/src/test/results/clientpositive/ppd_outer_join3.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN FROM src a FULL OUTER JOIN @@ -9,9 +7,7 @@ EXPLAIN SELECT a.key, a.value, b.key, b.value WHERE a.key > '10' AND a.key < '20' AND b.key > '15' AND b.key < '25' PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM src a FULL OUTER JOIN diff --git a/ql/src/test/results/clientpositive/ppd_outer_join4.q.out b/ql/src/test/results/clientpositive/ppd_outer_join4.q.out index 8ce6b88..e8e9b55 100644 --- a/ql/src/test/results/clientpositive/ppd_outer_join4.q.out +++ b/ql/src/test/results/clientpositive/ppd_outer_join4.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN FROM src a LEFT OUTER JOIN @@ -12,9 +10,7 @@ EXPLAIN SELECT a.key, a.value, b.key, b.value, c.key WHERE a.key > '10' AND a.key < '20' AND b.key > '15' AND b.key < '25' AND sqrt(c.key) <> 13 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM src a LEFT OUTER JOIN diff --git a/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out b/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out index 6f2937a..738424b 100644 --- a/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out +++ b/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out @@ -22,15 +22,13 @@ POSTHOOK: query: create table pokes2 (foo int, bar int, blah int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@pokes2 -PREHOOK: query: -- Q1: predicate should not be pushed on the right side of a left outer join -explain +PREHOOK: query: explain SELECT a.foo as foo1, b.foo as foo2, b.bar FROM pokes a LEFT OUTER JOIN pokes2 b ON a.foo=b.foo WHERE b.bar=3 PREHOOK: type: QUERY -POSTHOOK: query: -- Q1: predicate should not be pushed on the right side of a left outer join -explain +POSTHOOK: query: explain SELECT a.foo as foo1, b.foo as foo2, b.bar FROM pokes a LEFT OUTER JOIN pokes2 b ON a.foo=b.foo @@ -101,16 +99,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Q2: predicate should not be pushed on the right side of a left outer join -explain +PREHOOK: query: explain SELECT * FROM (SELECT a.foo as foo1, b.foo as foo2, b.bar FROM pokes a LEFT OUTER JOIN pokes2 b ON a.foo=b.foo) a WHERE a.bar=3 PREHOOK: type: QUERY -POSTHOOK: query: -- Q2: predicate should not be pushed on the right side of a left outer join -explain +POSTHOOK: query: explain SELECT * FROM (SELECT a.foo as foo1, b.foo as foo2, b.bar FROM pokes a LEFT OUTER JOIN pokes2 b @@ -182,16 +178,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Q3: predicate should be pushed -explain +PREHOOK: query: explain SELECT * FROM (SELECT a.foo as foo1, b.foo as foo2, a.bar FROM pokes a JOIN pokes2 b ON a.foo=b.foo) a WHERE a.bar=3 PREHOOK: type: QUERY -POSTHOOK: query: -- Q3: predicate should be pushed -explain +POSTHOOK: query: explain SELECT * FROM (SELECT a.foo as foo1, b.foo as foo2, a.bar FROM pokes a JOIN pokes2 b @@ -264,11 +258,9 @@ STAGE PLANS: ListSink Warning: Shuffle Join JOIN[15][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product -PREHOOK: query: -- Q4: here, the filter c.bar should be created under the first join but above the second -explain select c.foo, d.bar from (select c.foo, b.bar, c.blah from pokes c left outer join pokes b on c.foo=b.foo) c left outer join pokes d where d.foo=1 and c.bar=2 +PREHOOK: query: explain select c.foo, d.bar from (select c.foo, b.bar, c.blah from pokes c left outer join pokes b on c.foo=b.foo) c left outer join pokes d where d.foo=1 and c.bar=2 PREHOOK: type: QUERY -POSTHOOK: query: -- Q4: here, the filter c.bar should be created under the first join but above the second -explain select c.foo, d.bar from (select c.foo, b.bar, c.blah from pokes c left outer join pokes b on c.foo=b.foo) c left outer join pokes d where d.foo=1 and c.bar=2 +POSTHOOK: query: explain select c.foo, d.bar from (select c.foo, b.bar, c.blah from pokes c left outer join pokes b on c.foo=b.foo) c left outer join pokes d where d.foo=1 and c.bar=2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/ppd_transform.q.out b/ql/src/test/results/clientpositive/ppd_transform.q.out index 4fb3dc5..b38088f 100644 --- a/ql/src/test/results/clientpositive/ppd_transform.q.out +++ b/ql/src/test/results/clientpositive/ppd_transform.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN FROM ( FROM src SELECT TRANSFORM(src.key, src.value) @@ -9,9 +7,7 @@ FROM ( ) tmap SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM ( FROM src SELECT TRANSFORM(src.key, src.value) @@ -350,17 +346,13 @@ POSTHOOK: Input: default@src 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- test described in HIVE-4598 - -EXPLAIN +PREHOOK: query: EXPLAIN FROM ( FROM ( SELECT * FROM src ) mapout REDUCE * USING 'cat' AS x,y ) reduced #### A masked pattern was here #### PREHOOK: type: QUERY -POSTHOOK: query: -- test described in HIVE-4598 - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM ( FROM ( SELECT * FROM src ) mapout REDUCE * USING 'cat' AS x,y ) reduced diff --git a/ql/src/test/results/clientpositive/ppd_vc.q.out b/ql/src/test/results/clientpositive/ppd_vc.q.out index 21181ac..acd4027 100644 --- a/ql/src/test/results/clientpositive/ppd_vc.q.out +++ b/ql/src/test/results/clientpositive/ppd_vc.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: --HIVE-3926 PPD on virtual column of partitioned table is not working - -explain extended +PREHOOK: query: explain extended select * from srcpart where BLOCK__OFFSET__INSIDE__FILE<100 PREHOOK: type: QUERY -POSTHOOK: query: --HIVE-3926 PPD on virtual column of partitioned table is not working - -explain extended +POSTHOOK: query: explain extended select * from srcpart where BLOCK__OFFSET__INSIDE__FILE<100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/ppd_windowing1.q.out b/ql/src/test/results/clientpositive/ppd_windowing1.q.out index 5f73138..68b1c0e 100644 --- a/ql/src/test/results/clientpositive/ppd_windowing1.q.out +++ b/ql/src/test/results/clientpositive/ppd_windowing1.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- Test simple PPD through Windowing -EXPLAIN select * from (SELECT key, sum(key) over(partition by key) as c1 from src)r1 where key > '2' +PREHOOK: query: EXPLAIN select * from (SELECT key, sum(key) over(partition by key) as c1 from src)r1 where key > '2' PREHOOK: type: QUERY -POSTHOOK: query: -- Test simple PPD through Windowing -EXPLAIN select * from (SELECT key, sum(key) over(partition by key) as c1 from src)r1 where key > '2' +POSTHOOK: query: EXPLAIN select * from (SELECT key, sum(key) over(partition by key) as c1 from src)r1 where key > '2' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -330,11 +328,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Test PPD through Windowing where predicate is a subset of partition keys -EXPLAIN select * from (SELECT key, sum(key) over(partition by key, value) as c1 from src)r1 where key > '2' +PREHOOK: query: EXPLAIN select * from (SELECT key, sum(key) over(partition by key, value) as c1 from src)r1 where key > '2' PREHOOK: type: QUERY -POSTHOOK: query: -- Test PPD through Windowing where predicate is a subset of partition keys -EXPLAIN select * from (SELECT key, sum(key) over(partition by key, value) as c1 from src)r1 where key > '2' +POSTHOOK: query: EXPLAIN select * from (SELECT key, sum(key) over(partition by key, value) as c1 from src)r1 where key > '2' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -662,11 +658,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Test PPD through Windowing where predicate is a subset of partition keys, multiple windows are involved and UDAF is same -EXPLAIN select * from (SELECT key, sum(key) over(partition by key,value) as c1, sum(key) over(partition by key) as c2 from src)r1 where key > '2' +PREHOOK: query: EXPLAIN select * from (SELECT key, sum(key) over(partition by key,value) as c1, sum(key) over(partition by key) as c2 from src)r1 where key > '2' PREHOOK: type: QUERY -POSTHOOK: query: -- Test PPD through Windowing where predicate is a subset of partition keys, multiple windows are involved and UDAF is same -EXPLAIN select * from (SELECT key, sum(key) over(partition by key,value) as c1, sum(key) over(partition by key) as c2 from src)r1 where key > '2' +POSTHOOK: query: EXPLAIN select * from (SELECT key, sum(key) over(partition by key,value) as c1, sum(key) over(partition by key) as c2 from src)r1 where key > '2' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1116,11 +1110,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Test PPD through Windowing where predicate is a subset of partition keys, multiple windows are involved and UDAF has different args -EXPLAIN select * from (SELECT key, sum(key) over(partition by key,value) as c1, sum(value) over(partition by key) as c2 from src)r1 where key > '2' +PREHOOK: query: EXPLAIN select * from (SELECT key, sum(key) over(partition by key,value) as c1, sum(value) over(partition by key) as c2 from src)r1 where key > '2' PREHOOK: type: QUERY -POSTHOOK: query: -- Test PPD through Windowing where predicate is a subset of partition keys, multiple windows are involved and UDAF has different args -EXPLAIN select * from (SELECT key, sum(key) over(partition by key,value) as c1, sum(value) over(partition by key) as c2 from src)r1 where key > '2' +POSTHOOK: query: EXPLAIN select * from (SELECT key, sum(key) over(partition by key,value) as c1, sum(value) over(partition by key) as c2 from src)r1 where key > '2' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1570,11 +1562,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Test predicate is not getting pushed down when multiple windows are involved and they don't have common partition keys -EXPLAIN select * from (SELECT key, sum(key) over(partition by key,value) as c1, avg(value) over(partition by value) as c2 from src)r1 where key > '2' +PREHOOK: query: EXPLAIN select * from (SELECT key, sum(key) over(partition by key,value) as c1, avg(value) over(partition by value) as c2 from src)r1 where key > '2' PREHOOK: type: QUERY -POSTHOOK: query: -- Test predicate is not getting pushed down when multiple windows are involved and they don't have common partition keys -EXPLAIN select * from (SELECT key, sum(key) over(partition by key,value) as c1, avg(value) over(partition by value) as c2 from src)r1 where key > '2' +POSTHOOK: query: EXPLAIN select * from (SELECT key, sum(key) over(partition by key,value) as c1, avg(value) over(partition by value) as c2 from src)r1 where key > '2' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1685,11 +1675,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Test predicate is not getting pushed down when window has compound partition key -EXPLAIN select * from (SELECT key, sum(key) over(partition by key + 2) as c1 from src)r1 where key > '2' +PREHOOK: query: EXPLAIN select * from (SELECT key, sum(key) over(partition by key + 2) as c1 from src)r1 where key > '2' PREHOOK: type: QUERY -POSTHOOK: query: -- Test predicate is not getting pushed down when window has compound partition key -EXPLAIN select * from (SELECT key, sum(key) over(partition by key + 2) as c1 from src)r1 where key > '2' +POSTHOOK: query: EXPLAIN select * from (SELECT key, sum(key) over(partition by key + 2) as c1 from src)r1 where key > '2' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1821,11 +1809,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Test predicate is not getting pushed down when predicate involves more than one col -EXPLAIN select * from (SELECT key, value, sum(key) over(partition by key, value) as c1 from src)r1 where (key + value) > '2' +PREHOOK: query: EXPLAIN select * from (SELECT key, value, sum(key) over(partition by key, value) as c1 from src)r1 where (key + value) > '2' PREHOOK: type: QUERY -POSTHOOK: query: -- Test predicate is not getting pushed down when predicate involves more than one col -EXPLAIN select * from (SELECT key, value, sum(key) over(partition by key, value) as c1 from src)r1 where (key + value) > '2' +POSTHOOK: query: EXPLAIN select * from (SELECT key, value, sum(key) over(partition by key, value) as c1 from src)r1 where (key + value) > '2' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/ppd_windowing2.q.out b/ql/src/test/results/clientpositive/ppd_windowing2.q.out index 0e2081b..0792e8f 100644 --- a/ql/src/test/results/clientpositive/ppd_windowing2.q.out +++ b/ql/src/test/results/clientpositive/ppd_windowing2.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- NOTE: This is a correctness test. If you regen q.out, regen it with optimization turned off - --- Test simple PPD through Windowing -select * from (SELECT key, sum(key) over(partition by key) as c1 from src)r1 where key > '2' +PREHOOK: query: select * from (SELECT key, sum(key) over(partition by key) as c1 from src)r1 where key > '2' PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- NOTE: This is a correctness test. If you regen q.out, regen it with optimization turned off - --- Test simple PPD through Windowing -select * from (SELECT key, sum(key) over(partition by key) as c1 from src)r1 where key > '2' +POSTHOOK: query: select * from (SELECT key, sum(key) over(partition by key) as c1 from src)r1 where key > '2' POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### @@ -2414,13 +2408,11 @@ POSTHOOK: Input: default@src 99 194.0 100 196.0 100 196.0 -PREHOOK: query: -- Test PPD through Windowing where predicate is a subset of partition keys -select * from (SELECT key, sum(key) over(partition by key, value) as c1 from src)r1 where key > '2' +PREHOOK: query: select * from (SELECT key, sum(key) over(partition by key, value) as c1 from src)r1 where key > '2' PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Test PPD through Windowing where predicate is a subset of partition keys -select * from (SELECT key, sum(key) over(partition by key, value) as c1 from src)r1 where key > '2' +POSTHOOK: query: select * from (SELECT key, sum(key) over(partition by key, value) as c1 from src)r1 where key > '2' POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### @@ -4826,13 +4818,11 @@ POSTHOOK: Input: default@src 99 194.0 100 196.0 100 196.0 -PREHOOK: query: -- Test PPD through Windowing where predicate is a subset of partition keys, multiple windows are involved and UDAF is same -select * from (SELECT key, sum(key) over(partition by key,value) as c1, sum(key) over(partition by key) as c2 from src)r1 where key > '2' +PREHOOK: query: select * from (SELECT key, sum(key) over(partition by key,value) as c1, sum(key) over(partition by key) as c2 from src)r1 where key > '2' PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Test PPD through Windowing where predicate is a subset of partition keys, multiple windows are involved and UDAF is same -select * from (SELECT key, sum(key) over(partition by key,value) as c1, sum(key) over(partition by key) as c2 from src)r1 where key > '2' +POSTHOOK: query: select * from (SELECT key, sum(key) over(partition by key,value) as c1, sum(key) over(partition by key) as c2 from src)r1 where key > '2' POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### @@ -6734,13 +6724,11 @@ POSTHOOK: Input: default@src 99 194.0 194.0 100 196.0 196.0 100 196.0 196.0 -PREHOOK: query: -- Test PPD through Windowing where predicate is a subset of partition keys, multiple windows are involved and UDAF has different args -select * from (SELECT key, sum(key) over(partition by key,value) as c1, sum(value) over(partition by key) as c2 from src)r1 where key > '2' +PREHOOK: query: select * from (SELECT key, sum(key) over(partition by key,value) as c1, sum(value) over(partition by key) as c2 from src)r1 where key > '2' PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Test PPD through Windowing where predicate is a subset of partition keys, multiple windows are involved and UDAF has different args -select * from (SELECT key, sum(key) over(partition by key,value) as c1, sum(value) over(partition by key) as c2 from src)r1 where key > '2' +POSTHOOK: query: select * from (SELECT key, sum(key) over(partition by key,value) as c1, sum(value) over(partition by key) as c2 from src)r1 where key > '2' POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### @@ -8642,13 +8630,11 @@ POSTHOOK: Input: default@src 99 194.0 0.0 100 196.0 0.0 100 196.0 0.0 -PREHOOK: query: -- Test predicate is not getting pushed down when multiple windows are involved and they don't have common partition keys -select * from (SELECT key, sum(key) over(partition by key,value) as c1, avg(value) over(partition by value) as c2 from src)r1 where key > '2' +PREHOOK: query: select * from (SELECT key, sum(key) over(partition by key,value) as c1, avg(value) over(partition by value) as c2 from src)r1 where key > '2' PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Test predicate is not getting pushed down when multiple windows are involved and they don't have common partition keys -select * from (SELECT key, sum(key) over(partition by key,value) as c1, avg(value) over(partition by value) as c2 from src)r1 where key > '2' +POSTHOOK: query: select * from (SELECT key, sum(key) over(partition by key,value) as c1, avg(value) over(partition by value) as c2 from src)r1 where key > '2' POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### @@ -9033,13 +9019,11 @@ POSTHOOK: Input: default@src 97 194.0 NULL 98 196.0 NULL 98 196.0 NULL -PREHOOK: query: -- Test predicate is not getting pushed down when window has compound partition key -select * from (SELECT key, sum(key) over(partition by key + 2) as c1 from src)r1 where key > '2' +PREHOOK: query: select * from (SELECT key, sum(key) over(partition by key + 2) as c1 from src)r1 where key > '2' PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Test predicate is not getting pushed down when window has compound partition key -select * from (SELECT key, sum(key) over(partition by key + 2) as c1 from src)r1 where key > '2' +POSTHOOK: query: select * from (SELECT key, sum(key) over(partition by key + 2) as c1 from src)r1 where key > '2' POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### @@ -9813,13 +9797,11 @@ POSTHOOK: Input: default@src 311 130091.0 86 130091.0 238 130091.0 -PREHOOK: query: -- Test predicate is not getting pushed down when predicate involves more than one col -select * from (SELECT key, value, sum(key) over(partition by key, value) as c1 from src)r1 where (key + value) > '2' +PREHOOK: query: select * from (SELECT key, value, sum(key) over(partition by key, value) as c1 from src)r1 where (key + value) > '2' PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Test predicate is not getting pushed down when predicate involves more than one col -select * from (SELECT key, value, sum(key) over(partition by key, value) as c1 from src)r1 where (key + value) > '2' +POSTHOOK: query: select * from (SELECT key, value, sum(key) over(partition by key, value) as c1 from src)r1 where (key + value) > '2' POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/ptf_decimal.q.out b/ql/src/test/results/clientpositive/ptf_decimal.q.out index 8494e97..36a5af9 100644 --- a/ql/src/test/results/clientpositive/ptf_decimal.q.out +++ b/ql/src/test/results/clientpositive/ptf_decimal.q.out @@ -1,8 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 1. aggregate functions with decimal type - -select p_mfgr, p_retailprice, +PREHOOK: query: select p_mfgr, p_retailprice, lead(p_retailprice) over (partition by p_mfgr ORDER BY p_name) as c1, lag(p_retailprice) over (partition by p_mfgr ORDER BY p_name) as c2, first_value(p_retailprice) over (partition by p_mfgr ORDER BY p_name) as c3, @@ -11,11 +7,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 1. aggregate functions with decimal type - -select p_mfgr, p_retailprice, +POSTHOOK: query: select p_mfgr, p_retailprice, lead(p_retailprice) over (partition by p_mfgr ORDER BY p_name) as c1, lag(p_retailprice) over (partition by p_mfgr ORDER BY p_name) as c2, first_value(p_retailprice) over (partition by p_mfgr ORDER BY p_name) as c3, @@ -50,9 +42,7 @@ Manufacturer#5 1464.48 NULL 1018.1 1789.69 1464.48 Manufacturer#5 1611.66 1788.73 1789.69 1789.69 1611.66 Manufacturer#5 1788.73 1018.1 1611.66 1789.69 1788.73 Manufacturer#5 1789.69 1611.66 NULL 1789.69 1789.69 -PREHOOK: query: -- 2. ranking functions with decimal type - -select p_mfgr, p_retailprice, +PREHOOK: query: select p_mfgr, p_retailprice, row_number() over (PARTITION BY p_mfgr ORDER BY p_retailprice) as c1, rank() over (PARTITION BY p_mfgr ORDER BY p_retailprice) as c2, dense_rank() over (PARTITION BY p_mfgr ORDER BY p_retailprice) as c3, @@ -63,9 +53,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 2. ranking functions with decimal type - -select p_mfgr, p_retailprice, +POSTHOOK: query: select p_mfgr, p_retailprice, row_number() over (PARTITION BY p_mfgr ORDER BY p_retailprice) as c1, rank() over (PARTITION BY p_mfgr ORDER BY p_retailprice) as c2, dense_rank() over (PARTITION BY p_mfgr ORDER BY p_retailprice) as c3, @@ -102,17 +90,13 @@ Manufacturer#5 1464.48 2 2 2 0.25 0.4 2 Manufacturer#5 1611.66 3 3 3 0.5 0.6 3 Manufacturer#5 1788.73 4 4 4 0.75 0.8 4 Manufacturer#5 1789.69 5 5 5 1.0 1.0 5 -PREHOOK: query: -- 3. order by decimal - -select p_mfgr, p_retailprice, +PREHOOK: query: select p_mfgr, p_retailprice, lag(p_retailprice) over (partition by p_mfgr ORDER BY p_retailprice desc) as c1 from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 3. order by decimal - -select p_mfgr, p_retailprice, +POSTHOOK: query: select p_mfgr, p_retailprice, lag(p_retailprice) over (partition by p_mfgr ORDER BY p_retailprice desc) as c1 from part POSTHOOK: type: QUERY @@ -144,17 +128,13 @@ Manufacturer#5 1464.48 1611.66 Manufacturer#5 1611.66 1788.73 Manufacturer#5 1788.73 1789.69 Manufacturer#5 1789.69 NULL -PREHOOK: query: -- 4. partition by decimal - -select p_mfgr, p_retailprice, +PREHOOK: query: select p_mfgr, p_retailprice, lag(p_retailprice) over (partition by p_retailprice) as c1 from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 4. partition by decimal - -select p_mfgr, p_retailprice, +POSTHOOK: query: select p_mfgr, p_retailprice, lag(p_retailprice) over (partition by p_retailprice) as c1 from part POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/ptf_general_queries.q.out b/ql/src/test/results/clientpositive/ptf_general_queries.q.out index a3d6194..8ab2c22 100644 --- a/ql/src/test/results/clientpositive/ptf_general_queries.q.out +++ b/ql/src/test/results/clientpositive/ptf_general_queries.q.out @@ -1,17 +1,11 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 1. testNoPTFNoWindowing -select p_mfgr, p_name, p_size +PREHOOK: query: select p_mfgr, p_name, p_size from part distribute by p_mfgr sort by p_name PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 1. testNoPTFNoWindowing -select p_mfgr, p_name, p_size +POSTHOOK: query: select p_mfgr, p_name, p_size from part distribute by p_mfgr sort by p_name @@ -44,8 +38,7 @@ Manufacturer#5 almond antique medium spring khaki 6 Manufacturer#5 almond antique sky peru orange 2 Manufacturer#5 almond aquamarine dodger light gainsboro 46 Manufacturer#5 almond azure blanched chiffon midnight 23 -PREHOOK: query: -- 2. testUDAFsNoWindowingNoPTFNoGBY -select p_mfgr,p_name, p_retailprice, +PREHOOK: query: select p_mfgr,p_name, p_retailprice, sum(p_retailprice) over(partition by p_mfgr order by p_name) as s, min(p_retailprice) over(partition by p_mfgr order by p_name) as mi, max(p_retailprice) over(partition by p_mfgr order by p_name) as ma, @@ -54,8 +47,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 2. testUDAFsNoWindowingNoPTFNoGBY -select p_mfgr,p_name, p_retailprice, +POSTHOOK: query: select p_mfgr,p_name, p_retailprice, sum(p_retailprice) over(partition by p_mfgr order by p_name) as s, min(p_retailprice) over(partition by p_mfgr order by p_name) as mi, max(p_retailprice) over(partition by p_mfgr order by p_name) as ma, @@ -90,13 +82,11 @@ Manufacturer#5 almond antique medium spring khaki 1611.66 3401.3500000000004 161 Manufacturer#5 almond antique sky peru orange 1788.73 5190.08 1611.66 1789.69 1730.0266666666666 Manufacturer#5 almond aquamarine dodger light gainsboro 1018.1 6208.18 1018.1 1789.69 1552.045 Manufacturer#5 almond azure blanched chiffon midnight 1464.48 7672.66 1018.1 1789.69 1534.532 -PREHOOK: query: -- 3. testConstExprInSelect -select 'tst1' as key, count(1) as value from part +PREHOOK: query: select 'tst1' as key, count(1) as value from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 3. testConstExprInSelect -select 'tst1' as key, count(1) as value from part +POSTHOOK: query: select 'tst1' as key, count(1) as value from part POSTHOOK: type: QUERY POSTHOOK: Input: default@part #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/ptf_matchpath.q.out b/ql/src/test/results/clientpositive/ptf_matchpath.q.out index 6dad660..b7409f9 100644 --- a/ql/src/test/results/clientpositive/ptf_matchpath.q.out +++ b/ql/src/test/results/clientpositive/ptf_matchpath.q.out @@ -34,10 +34,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt' OVER POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@flights_tiny -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 1. basic Matchpath test -explain +PREHOOK: query: explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on flights_tiny @@ -48,10 +45,7 @@ from matchpath(on arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath') ) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 1. basic Matchpath test -explain +POSTHOOK: query: explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on flights_tiny @@ -158,8 +152,7 @@ Chicago 897 2010 10 20 4 20 Chicago 897 2010 10 21 3 21 Chicago 897 2010 10 22 2 22 Washington 7291 2010 10 27 2 27 -PREHOOK: query: -- 2. Matchpath on 1 partition -explain +PREHOOK: query: explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on flights_tiny @@ -170,8 +163,7 @@ from matchpath(on ) where fl_num = 1142 PREHOOK: type: QUERY -POSTHOOK: query: -- 2. Matchpath on 1 partition -explain +POSTHOOK: query: explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on flights_tiny @@ -270,8 +262,7 @@ Baltimore 1142 2010 10 21 5 21 Baltimore 1142 2010 10 22 4 22 Baltimore 1142 2010 10 25 3 25 Baltimore 1142 2010 10 26 2 26 -PREHOOK: query: -- 3. empty partition. -explain +PREHOOK: query: explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on (select * from flights_tiny where fl_num = -1142) flights_tiny @@ -281,8 +272,7 @@ from matchpath(on arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath') ) PREHOOK: type: QUERY -POSTHOOK: query: -- 3. empty partition. -explain +POSTHOOK: query: explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on (select * from flights_tiny where fl_num = -1142) flights_tiny diff --git a/ql/src/test/results/clientpositive/ptf_rcfile.q.out b/ql/src/test/results/clientpositive/ptf_rcfile.q.out index ba83797..a6721fe 100644 --- a/ql/src/test/results/clientpositive/ptf_rcfile.q.out +++ b/ql/src/test/results/clientpositive/ptf_rcfile.q.out @@ -38,10 +38,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part.rc' overwrite int POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@part_rc -PREHOOK: query: -- SORT_QUERY_RESULTS - --- testWindowingPTFWithPartRC -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 @@ -51,10 +48,7 @@ order by p_name) PREHOOK: type: QUERY PREHOOK: Input: default@part_rc #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- testWindowingPTFWithPartRC -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 diff --git a/ql/src/test/results/clientpositive/ptf_register_tblfn.q.out b/ql/src/test/results/clientpositive/ptf_register_tblfn.q.out index 0edff7a..ad7c526 100644 --- a/ql/src/test/results/clientpositive/ptf_register_tblfn.q.out +++ b/ql/src/test/results/clientpositive/ptf_register_tblfn.q.out @@ -40,10 +40,7 @@ PREHOOK: Output: matchpathtest POSTHOOK: query: create temporary function matchpathtest as 'org.apache.hadoop.hive.ql.udf.ptf.MatchPath$MatchPathResolver' POSTHOOK: type: CREATEFUNCTION POSTHOOK: Output: matchpathtest -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 1. basic Matchpath test -select origin_city_name, fl_num, year, month, day_of_month, sz, tpath +PREHOOK: query: select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpathtest(on flights_tiny distribute by fl_num @@ -55,10 +52,7 @@ from matchpathtest(on PREHOOK: type: QUERY PREHOOK: Input: default@flights_tiny #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 1. basic Matchpath test -select origin_city_name, fl_num, year, month, day_of_month, sz, tpath +POSTHOOK: query: select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpathtest(on flights_tiny distribute by fl_num diff --git a/ql/src/test/results/clientpositive/ptf_seqfile.q.out b/ql/src/test/results/clientpositive/ptf_seqfile.q.out index aa270e5..044638f 100644 --- a/ql/src/test/results/clientpositive/ptf_seqfile.q.out +++ b/ql/src/test/results/clientpositive/ptf_seqfile.q.out @@ -38,10 +38,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part.seq' overwrite in POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@part_seq -PREHOOK: query: -- SORT_QUERY_RESULTS - --- testWindowingPTFWithPartSeqFile -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 @@ -51,10 +48,7 @@ order by p_name) PREHOOK: type: QUERY PREHOOK: Input: default@part_seq #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- testWindowingPTFWithPartSeqFile -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 diff --git a/ql/src/test/results/clientpositive/query_with_semi.q.out b/ql/src/test/results/clientpositive/query_with_semi.q.out index 001cf7d..93da006 100644 --- a/ql/src/test/results/clientpositive/query_with_semi.q.out +++ b/ql/src/test/results/clientpositive/query_with_semi.q.out @@ -1,10 +1,10 @@ PREHOOK: query: from src -select transform('aa;') using 'cat' as a limit 1 +select transform('aa\;') using 'cat' as a limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### POSTHOOK: query: from src -select transform('aa;') using 'cat' as a limit 1 +select transform('aa\;') using 'cat' as a limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/quotedid_alter.q.out b/ql/src/test/results/clientpositive/quotedid_alter.q.out index 8d2ef57..219621d 100644 --- a/ql/src/test/results/clientpositive/quotedid_alter.q.out +++ b/ql/src/test/results/clientpositive/quotedid_alter.q.out @@ -16,13 +16,11 @@ clustered by (`!@#$%^&*()_q`) sorted by (`!@#$%^&*()_q`) into 2 buckets POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@src_b3 POSTHOOK: Output: default@src_b3 -PREHOOK: query: -- alter partition -create table src_p3(`x+1` string, `y&y` string) partitioned by (`!@#$%^&*()_q` string) +PREHOOK: query: create table src_p3(`x+1` string, `y&y` string) partitioned by (`!@#$%^&*()_q` string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_p3 -POSTHOOK: query: -- alter partition -create table src_p3(`x+1` string, `y&y` string) partitioned by (`!@#$%^&*()_q` string) +POSTHOOK: query: create table src_p3(`x+1` string, `y&y` string) partitioned by (`!@#$%^&*()_q` string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_p3 diff --git a/ql/src/test/results/clientpositive/quotedid_basic.q.out b/ql/src/test/results/clientpositive/quotedid_basic.q.out index 4f79a46..382c4ad 100644 --- a/ql/src/test/results/clientpositive/quotedid_basic.q.out +++ b/ql/src/test/results/clientpositive/quotedid_basic.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- basic -create table t1(`x+1` string, `y&y` string, `!@#$%^&*()_q` string) +PREHOOK: query: create table t1(`x+1` string, `y&y` string, `!@#$%^&*()_q` string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- basic -create table t1(`x+1` string, `y&y` string, `!@#$%^&*()_q` string) +POSTHOOK: query: create table t1(`x+1` string, `y&y` string, `!@#$%^&*()_q` string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 @@ -240,12 +238,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- case insensitive -explain select `X+1`, `Y&y`, `!@#$%^&*()_Q`, rank() over(partition by `!@#$%^&*()_q` order by `y&y`) +PREHOOK: query: explain select `X+1`, `Y&y`, `!@#$%^&*()_Q`, rank() over(partition by `!@#$%^&*()_q` order by `y&y`) from t1 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&Y`, `!@#$%^&*()_q` having `!@#$%^&*()_Q` = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- case insensitive -explain select `X+1`, `Y&y`, `!@#$%^&*()_Q`, rank() over(partition by `!@#$%^&*()_q` order by `y&y`) +POSTHOOK: query: explain select `X+1`, `Y&y`, `!@#$%^&*()_Q`, rank() over(partition by `!@#$%^&*()_q` order by `y&y`) from t1 where `!@#$%^&*()_q` = '1' group by `x+1`, `y&Y`, `!@#$%^&*()_q` having `!@#$%^&*()_Q` = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -344,13 +340,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- escaped back ticks -create table t4(`x+1``` string, `y&y` string) +PREHOOK: query: create table t4(`x+1``` string, `y&y` string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t4 -POSTHOOK: query: -- escaped back ticks -create table t4(`x+1``` string, `y&y` string) +POSTHOOK: query: create table t4(`x+1``` string, `y&y` string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t4 @@ -383,16 +377,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@t4 #### A masked pattern was here #### 10 val_10 1 -PREHOOK: query: -- view -create view v1 as +PREHOOK: query: create view v1 as select `x+1```, `y&y` from t4 where `x+1``` < '200' PREHOOK: type: CREATEVIEW PREHOOK: Input: default@t4 PREHOOK: Output: database:default PREHOOK: Output: default@v1 -POSTHOOK: query: -- view -create view v1 as +POSTHOOK: query: create view v1 as select `x+1```, `y&y` from t4 where `x+1``` < '200' POSTHOOK: type: CREATEVIEW diff --git a/ql/src/test/results/clientpositive/quotedid_skew.q.out b/ql/src/test/results/clientpositive/quotedid_skew.q.out index a18c823..cd73c18 100644 --- a/ql/src/test/results/clientpositive/quotedid_skew.q.out +++ b/ql/src/test/results/clientpositive/quotedid_skew.q.out @@ -34,16 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- a simple join query with skew on both the tables on the join key --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a. `!@#$%^&*()_q` = b. `!@#$%^&*()_q` PREHOOK: type: QUERY -POSTHOOK: query: -- a simple join query with skew on both the tables on the join key --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a. `!@#$%^&*()_q` = b. `!@#$%^&*()_q` POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out b/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out index f3fd8f8..210eedf 100644 --- a/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out +++ b/ql/src/test/results/clientpositive/rand_partitionpruner1.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- scanning un-partitioned data -explain extended select * from src where rand(1) < 0.1 +PREHOOK: query: explain extended select * from src where rand(1) < 0.1 PREHOOK: type: QUERY -POSTHOOK: query: -- scanning un-partitioned data -explain extended select * from src where rand(1) < 0.1 +POSTHOOK: query: explain extended select * from src where rand(1) < 0.1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out b/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out index df42672..62f9eef 100644 --- a/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out +++ b/ql/src/test/results/clientpositive/rand_partitionpruner2.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- scanning partitioned data - -create table tmptable(key string, value string, hr string, ds string) +PREHOOK: query: create table tmptable(key string, value string, hr string, ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- scanning partitioned data - -create table tmptable(key string, value string, hr string, ds string) +POSTHOOK: query: create table tmptable(key string, value string, hr string, ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable diff --git a/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out b/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out index 85de3f0..16b2ea4 100644 --- a/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out +++ b/ql/src/test/results/clientpositive/rand_partitionpruner3.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- complex predicates in the where clause - -explain extended select a.* from srcpart a where rand(1) < 0.1 and a.ds = '2008-04-08' and not(key > 50 or key < 10) and a.hr like '%2' +PREHOOK: query: explain extended select a.* from srcpart a where rand(1) < 0.1 and a.ds = '2008-04-08' and not(key > 50 or key < 10) and a.hr like '%2' PREHOOK: type: QUERY -POSTHOOK: query: -- complex predicates in the where clause - -explain extended select a.* from srcpart a where rand(1) < 0.1 and a.ds = '2008-04-08' and not(key > 50 or key < 10) and a.hr like '%2' +POSTHOOK: query: explain extended select a.* from srcpart a where rand(1) < 0.1 and a.ds = '2008-04-08' and not(key > 50 or key < 10) and a.hr like '%2' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -88,11 +84,9 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 26 val_26 2008-04-08 12 18 val_18 2008-04-08 12 37 val_37 2008-04-08 12 -PREHOOK: query: -- without rand for comparison -explain extended select a.* from srcpart a where a.ds = '2008-04-08' and not(key > 50 or key < 10) and a.hr like '%2' +PREHOOK: query: explain extended select a.* from srcpart a where a.ds = '2008-04-08' and not(key > 50 or key < 10) and a.hr like '%2' PREHOOK: type: QUERY -POSTHOOK: query: -- without rand for comparison -explain extended select a.* from srcpart a where a.ds = '2008-04-08' and not(key > 50 or key < 10) and a.hr like '%2' +POSTHOOK: query: explain extended select a.* from srcpart a where a.ds = '2008-04-08' and not(key > 50 or key < 10) and a.hr like '%2' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/recursive_dir.q.out b/ql/src/test/results/clientpositive/recursive_dir.q.out index 8789a2d..070f55b 100644 --- a/ql/src/test/results/clientpositive/recursive_dir.q.out +++ b/ql/src/test/results/clientpositive/recursive_dir.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - -CREATE TABLE fact_daily(x int) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE fact_daily(x int) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@fact_daily -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - -CREATE TABLE fact_daily(x int) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE fact_daily(x int) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@fact_daily diff --git a/ql/src/test/results/clientpositive/reduce_deduplicate_extended2.q.out b/ql/src/test/results/clientpositive/reduce_deduplicate_extended2.q.out index df2a5d2..8e35d1b 100644 --- a/ql/src/test/results/clientpositive/reduce_deduplicate_extended2.q.out +++ b/ql/src/test/results/clientpositive/reduce_deduplicate_extended2.q.out @@ -1,12 +1,10 @@ -PREHOOK: query: -- JOIN + GBY -EXPLAIN +PREHOOK: query: EXPLAIN SELECT f.key, g.value FROM src f JOIN src g ON (f.key = g.key AND f.value = g.value) GROUP BY g.value, f.key PREHOOK: type: QUERY -POSTHOOK: query: -- JOIN + GBY -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT f.key, g.value FROM src f JOIN src g ON (f.key = g.key AND f.value = g.value) @@ -101,16 +99,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- JOIN + GBY + OBY -EXPLAIN +PREHOOK: query: EXPLAIN SELECT g.key, f.value FROM src f JOIN src g ON (f.key = g.key AND f.value = g.value) GROUP BY g.key, f.value ORDER BY f.value, g.key PREHOOK: type: QUERY -POSTHOOK: query: -- JOIN + GBY + OBY -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT g.key, f.value FROM src f JOIN src g ON (f.key = g.key AND f.value = g.value) @@ -210,8 +206,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- GBY + JOIN + GBY -EXPLAIN +PREHOOK: query: EXPLAIN SELECT f.key, g.value FROM src f JOIN ( @@ -221,8 +216,7 @@ JOIN ( ON (f.key = g.key AND f.value = g.value) GROUP BY g.value, f.key PREHOOK: type: QUERY -POSTHOOK: query: -- GBY + JOIN + GBY -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT f.key, g.value FROM src f JOIN ( @@ -345,8 +339,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- 2GBY + JOIN + GBY -EXPLAIN +PREHOOK: query: EXPLAIN SELECT f.key, g.value FROM ( SELECT key, value @@ -359,8 +352,7 @@ JOIN ( ON (f.key = g.key AND f.value = g.value) GROUP BY g.value, f.key PREHOOK: type: QUERY -POSTHOOK: query: -- 2GBY + JOIN + GBY -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT f.key, g.value FROM ( SELECT key, value @@ -511,8 +503,7 @@ STAGE PLANS: ListSink Warning: Shuffle Join JOIN[14][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product -PREHOOK: query: -- 2GBY + JOIN + GBY + OBY -EXPLAIN +PREHOOK: query: EXPLAIN SELECT f.key, g.value FROM ( SELECT value @@ -525,8 +516,7 @@ JOIN ( GROUP BY g.value, f.key ORDER BY f.key desc, g.value PREHOOK: type: QUERY -POSTHOOK: query: -- 2GBY + JOIN + GBY + OBY -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT f.key, g.value FROM ( SELECT value @@ -676,8 +666,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- 2(2GBY + JOIN + GBY + OBY) + UNION -EXPLAIN +PREHOOK: query: EXPLAIN SELECT x.key, x.value FROM ( SELECT f.key, g.value @@ -706,8 +695,7 @@ UNION ALL ) x ORDER BY x.value desc, x.key desc PREHOOK: type: QUERY -POSTHOOK: query: -- 2(2GBY + JOIN + GBY + OBY) + UNION -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT x.key, x.value FROM ( SELECT f.key, g.value diff --git a/ql/src/test/results/clientpositive/remove_exprs_stats.q.out b/ql/src/test/results/clientpositive/remove_exprs_stats.q.out index ae2aa37..8fe688d 100644 --- a/ql/src/test/results/clientpositive/remove_exprs_stats.q.out +++ b/ql/src/test/results/clientpositive/remove_exprs_stats.q.out @@ -60,11 +60,9 @@ POSTHOOK: query: analyze table loc_orc compute statistics for columns state,loci POSTHOOK: type: QUERY POSTHOOK: Input: default@loc_orc #### A masked pattern was here #### -PREHOOK: query: -- always true -explain select * from loc_orc where locid < 30 +PREHOOK: query: explain select * from loc_orc where locid < 30 PREHOOK: type: QUERY -POSTHOOK: query: -- always true -explain select * from loc_orc where locid < 30 +POSTHOOK: query: explain select * from loc_orc where locid < 30 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -83,11 +81,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- always false -explain select * from loc_orc where locid > 30 +PREHOOK: query: explain select * from loc_orc where locid > 30 PREHOOK: type: QUERY -POSTHOOK: query: -- always false -explain select * from loc_orc where locid > 30 +POSTHOOK: query: explain select * from loc_orc where locid > 30 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -121,11 +117,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- always true -explain select * from loc_orc where locid <= 30 +PREHOOK: query: explain select * from loc_orc where locid <= 30 PREHOOK: type: QUERY -POSTHOOK: query: -- always true -explain select * from loc_orc where locid <= 30 +POSTHOOK: query: explain select * from loc_orc where locid <= 30 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -144,11 +138,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- always false -explain select * from loc_orc where locid >= 30 +PREHOOK: query: explain select * from loc_orc where locid >= 30 PREHOOK: type: QUERY -POSTHOOK: query: -- always false -explain select * from loc_orc where locid >= 30 +POSTHOOK: query: explain select * from loc_orc where locid >= 30 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -182,11 +174,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- nothing to do -explain select * from loc_orc where locid < 6 +PREHOOK: query: explain select * from loc_orc where locid < 6 PREHOOK: type: QUERY -POSTHOOK: query: -- nothing to do -explain select * from loc_orc where locid < 6 +POSTHOOK: query: explain select * from loc_orc where locid < 6 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -220,11 +210,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- always false -explain select * from loc_orc where locid > 6 +PREHOOK: query: explain select * from loc_orc where locid > 6 PREHOOK: type: QUERY -POSTHOOK: query: -- always false -explain select * from loc_orc where locid > 6 +POSTHOOK: query: explain select * from loc_orc where locid > 6 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -258,11 +246,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- always true -explain select * from loc_orc where locid <= 6 +PREHOOK: query: explain select * from loc_orc where locid <= 6 PREHOOK: type: QUERY -POSTHOOK: query: -- always true -explain select * from loc_orc where locid <= 6 +POSTHOOK: query: explain select * from loc_orc where locid <= 6 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -281,11 +267,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- nothing to do -explain select * from loc_orc where locid >= 6 +PREHOOK: query: explain select * from loc_orc where locid >= 6 PREHOOK: type: QUERY -POSTHOOK: query: -- nothing to do -explain select * from loc_orc where locid >= 6 +POSTHOOK: query: explain select * from loc_orc where locid >= 6 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -319,11 +303,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- always false -explain select * from loc_orc where locid < 1 +PREHOOK: query: explain select * from loc_orc where locid < 1 PREHOOK: type: QUERY -POSTHOOK: query: -- always false -explain select * from loc_orc where locid < 1 +POSTHOOK: query: explain select * from loc_orc where locid < 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -357,11 +339,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- nothing to do -explain select * from loc_orc where locid > 1 +PREHOOK: query: explain select * from loc_orc where locid > 1 PREHOOK: type: QUERY -POSTHOOK: query: -- nothing to do -explain select * from loc_orc where locid > 1 +POSTHOOK: query: explain select * from loc_orc where locid > 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -395,11 +375,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- nothing to do -explain select * from loc_orc where locid <= 1 +PREHOOK: query: explain select * from loc_orc where locid <= 1 PREHOOK: type: QUERY -POSTHOOK: query: -- nothing to do -explain select * from loc_orc where locid <= 1 +POSTHOOK: query: explain select * from loc_orc where locid <= 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -433,11 +411,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- always true -explain select * from loc_orc where locid >= 1 +PREHOOK: query: explain select * from loc_orc where locid >= 1 PREHOOK: type: QUERY -POSTHOOK: query: -- always true -explain select * from loc_orc where locid >= 1 +POSTHOOK: query: explain select * from loc_orc where locid >= 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -456,11 +432,9 @@ STAGE PLANS: Statistics: Num rows: 8 Data size: 816 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- 5 should stay -explain select * from loc_orc where locid IN (-4,5,30,40) +PREHOOK: query: explain select * from loc_orc where locid IN (-4,5,30,40) PREHOOK: type: QUERY -POSTHOOK: query: -- 5 should stay -explain select * from loc_orc where locid IN (-4,5,30,40) +POSTHOOK: query: explain select * from loc_orc where locid IN (-4,5,30,40) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -494,11 +468,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- nothing to do -explain select * from loc_orc where locid IN (5,2,3) +PREHOOK: query: explain select * from loc_orc where locid IN (5,2,3) PREHOOK: type: QUERY -POSTHOOK: query: -- nothing to do -explain select * from loc_orc where locid IN (5,2,3) +POSTHOOK: query: explain select * from loc_orc where locid IN (5,2,3) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -532,11 +504,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- 1 and 6 should be left -explain select * from loc_orc where locid IN (1,6,9) +PREHOOK: query: explain select * from loc_orc where locid IN (1,6,9) PREHOOK: type: QUERY -POSTHOOK: query: -- 1 and 6 should be left -explain select * from loc_orc where locid IN (1,6,9) +POSTHOOK: query: explain select * from loc_orc where locid IN (1,6,9) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -570,11 +540,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- always false -explain select * from loc_orc where locid IN (40,30) +PREHOOK: query: explain select * from loc_orc where locid IN (40,30) PREHOOK: type: QUERY -POSTHOOK: query: -- always false -explain select * from loc_orc where locid IN (40,30) +POSTHOOK: query: explain select * from loc_orc where locid IN (40,30) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/rename_column.q.out b/ql/src/test/results/clientpositive/rename_column.q.out index 10a4df2..6ea13f8 100644 --- a/ql/src/test/results/clientpositive/rename_column.q.out +++ b/ql/src/test/results/clientpositive/rename_column.q.out @@ -163,12 +163,10 @@ src_thrift srcbucket srcbucket2 srcpart -PREHOOK: query: -- Using non-default Database -CREATE DATABASE kv_rename_test_db +PREHOOK: query: CREATE DATABASE kv_rename_test_db PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:kv_rename_test_db -POSTHOOK: query: -- Using non-default Database -CREATE DATABASE kv_rename_test_db +POSTHOOK: query: CREATE DATABASE kv_rename_test_db POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:kv_rename_test_db PREHOOK: query: USE kv_rename_test_db diff --git a/ql/src/test/results/clientpositive/rename_partition_location.q.out b/ql/src/test/results/clientpositive/rename_partition_location.q.out index 52a5fd0..bfdd580 100644 --- a/ql/src/test/results/clientpositive/rename_partition_location.q.out +++ b/ql/src/test/results/clientpositive/rename_partition_location.q.out @@ -1,17 +1,11 @@ -PREHOOK: query: -- This test verifies that if the tables location changes, renaming a partition will not change --- the partition location accordingly - -CREATE TABLE rename_partition_table (key STRING, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE rename_partition_table (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default PREHOOK: Output: default@rename_partition_table -POSTHOOK: query: -- This test verifies that if the tables location changes, renaming a partition will not change --- the partition location accordingly - -CREATE TABLE rename_partition_table (key STRING, value STRING) PARTITIONED BY (part STRING) +POSTHOOK: query: CREATE TABLE rename_partition_table (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE #### A masked pattern was here #### POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/rename_table_location.q.out b/ql/src/test/results/clientpositive/rename_table_location.q.out index 7f481af..f491ae6 100644 --- a/ql/src/test/results/clientpositive/rename_table_location.q.out +++ b/ql/src/test/results/clientpositive/rename_table_location.q.out @@ -1,17 +1,11 @@ -PREHOOK: query: -- This test verifies that if the tables location changes, renaming a table will not change --- the table location scheme - -CREATE TABLE rename_partition_table (key STRING, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE rename_partition_table (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default PREHOOK: Output: default@rename_partition_table -POSTHOOK: query: -- This test verifies that if the tables location changes, renaming a table will not change --- the table location scheme - -CREATE TABLE rename_partition_table (key STRING, value STRING) PARTITIONED BY (part STRING) +POSTHOOK: query: CREATE TABLE rename_partition_table (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE #### A masked pattern was here #### POSTHOOK: type: CREATETABLE @@ -37,10 +31,7 @@ POSTHOOK: type: ALTERTABLE_LOCATION POSTHOOK: Input: default@rename_partition_table POSTHOOK: Output: default@rename_partition_table #### A masked pattern was here #### -PREHOOK: query: -- If the metastore attempts to change the scheme of the table back to the default pfile, it will get --- an exception related to the source and destination file systems not matching - -ALTER TABLE rename_partition_table RENAME TO rename_partition_table_renamed +PREHOOK: query: ALTER TABLE rename_partition_table RENAME TO rename_partition_table_renamed PREHOOK: type: ALTERTABLE_RENAME PREHOOK: Input: default@rename_partition_table PREHOOK: Output: default@rename_partition_table diff --git a/ql/src/test/results/clientpositive/repl_1_drop.q.out b/ql/src/test/results/clientpositive/repl_1_drop.q.out index 9fb65d1..5924e9f 100644 --- a/ql/src/test/results/clientpositive/repl_1_drop.q.out +++ b/ql/src/test/results/clientpositive/repl_1_drop.q.out @@ -57,14 +57,10 @@ POSTHOOK: query: drop table repl_employee for replication('33') POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@repl_employee POSTHOOK: Output: default@repl_employee -PREHOOK: query: -- drop 33 => table does not get dropped, but ca will be - -show partitions repl_employee +PREHOOK: query: show partitions repl_employee PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@repl_employee -POSTHOOK: query: -- drop 33 => table does not get dropped, but ca will be - -show partitions repl_employee +POSTHOOK: query: show partitions repl_employee POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@repl_employee PREHOOK: query: show table extended like repl_employee @@ -105,14 +101,10 @@ POSTHOOK: query: drop table repl_employee for replication('') POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@repl_employee POSTHOOK: Output: default@repl_employee -PREHOOK: query: -- drop '' => ptns would be dropped, but not tables - -show partitions repl_employee +PREHOOK: query: show partitions repl_employee PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@repl_employee -POSTHOOK: query: -- drop '' => ptns would be dropped, but not tables - -show partitions repl_employee +POSTHOOK: query: show partitions repl_employee POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@repl_employee PREHOOK: query: show table extended like repl_employee @@ -135,34 +127,26 @@ POSTHOOK: query: drop table repl_employee for replication('49') POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@repl_employee POSTHOOK: Output: default@repl_employee -PREHOOK: query: -- table and ptns should have been dropped, so next create can succeed - -create table repl_employee ( emp_id int comment "employee id") +PREHOOK: query: create table repl_employee ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@repl_employee -POSTHOOK: query: -- table and ptns should have been dropped, so next create can succeed - -create table repl_employee ( emp_id int comment "employee id") +POSTHOOK: query: create table repl_employee ( emp_id int comment "employee id") comment "employee table" partitioned by (emp_country string comment "two char iso code", emp_state string comment "free text") stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@repl_employee -PREHOOK: query: -- created table without a repl.last.id - -load data local inpath "../../data/files/test.dat" +PREHOOK: query: load data local inpath "../../data/files/test.dat" into table repl_employee partition (emp_country="us", emp_state="ca") PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@repl_employee -POSTHOOK: query: -- created table without a repl.last.id - -load data local inpath "../../data/files/test.dat" +POSTHOOK: query: load data local inpath "../../data/files/test.dat" into table repl_employee partition (emp_country="us", emp_state="ca") POSTHOOK: type: LOAD #### A masked pattern was here #### @@ -232,14 +216,10 @@ POSTHOOK: query: alter table repl_employee drop partition (emp_country="us", emp POSTHOOK: type: ALTERTABLE_DROPPARTS POSTHOOK: Input: default@repl_employee POSTHOOK: Output: default@repl_employee@emp_country=us/emp_state=wa -PREHOOK: query: -- should have dropped ca, wa - -show partitions repl_employee +PREHOOK: query: show partitions repl_employee PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@repl_employee -POSTHOOK: query: -- should have dropped ca, wa - -show partitions repl_employee +POSTHOOK: query: show partitions repl_employee POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@repl_employee emp_country=us/emp_state=ak @@ -276,14 +256,10 @@ POSTHOOK: query: alter table repl_employee drop partition (emp_country="us", emp POSTHOOK: type: ALTERTABLE_DROPPARTS POSTHOOK: Input: default@repl_employee POSTHOOK: Output: default@repl_employee@emp_country=us/emp_state=ak -PREHOOK: query: -- should have dropped ak - -show partitions repl_employee +PREHOOK: query: show partitions repl_employee PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@repl_employee -POSTHOOK: query: -- should have dropped ak - -show partitions repl_employee +POSTHOOK: query: show partitions repl_employee POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@repl_employee PREHOOK: query: show table extended like repl_employee @@ -306,15 +282,11 @@ POSTHOOK: query: drop table repl_employee POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@repl_employee POSTHOOK: Output: default@repl_employee -PREHOOK: query: -- should drop the whole table, and this can be verified by trying to create another table with the same name - -create table repl_employee( a string) +PREHOOK: query: create table repl_employee( a string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@repl_employee -POSTHOOK: query: -- should drop the whole table, and this can be verified by trying to create another table with the same name - -create table repl_employee( a string) +POSTHOOK: query: create table repl_employee( a string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@repl_employee diff --git a/ql/src/test/results/clientpositive/repl_2_exim_basic.q.out b/ql/src/test/results/clientpositive/repl_2_exim_basic.q.out index b2cbea9..8c8e6be 100644 --- a/ql/src/test/results/clientpositive/repl_2_exim_basic.q.out +++ b/ql/src/test/results/clientpositive/repl_2_exim_basic.q.out @@ -71,25 +71,11 @@ POSTHOOK: type: LOAD POSTHOOK: Output: default@ext_t POSTHOOK: Output: default@ext_t@emp_country=us/emp_state=ca #### A masked pattern was here #### -PREHOOK: query: -- verifying difference between normal export of a external table --- and a replication export of an ext table --- the replication export will have squashed the "EXTERNAL" flag --- this is because the destination of all replication exports are --- managed tables. The managed tables should be similar except --- for the repl.last.id values - -export table managed_t to 'ql/test/data/exports/managed_t' +PREHOOK: query: export table managed_t to 'ql/test/data/exports/managed_t' PREHOOK: type: EXPORT PREHOOK: Input: default@managed_t@emp_country=us/emp_state=ca #### A masked pattern was here #### -POSTHOOK: query: -- verifying difference between normal export of a external table --- and a replication export of an ext table --- the replication export will have squashed the "EXTERNAL" flag --- this is because the destination of all replication exports are --- managed tables. The managed tables should be similar except --- for the repl.last.id values - -export table managed_t to 'ql/test/data/exports/managed_t' +POSTHOOK: query: export table managed_t to 'ql/test/data/exports/managed_t' POSTHOOK: type: EXPORT POSTHOOK: Input: default@managed_t@emp_country=us/emp_state=ca #### A masked pattern was here #### @@ -214,13 +200,11 @@ POSTHOOK: Input: default@managed_t_imported@emp_country=us/emp_state=ca 4 us ca 5 us ca 6 us ca -PREHOOK: query: -- should have repl.last.id -import table managed_t_r_imported from 'ql/test/data/exports/managed_t_r' +PREHOOK: query: import table managed_t_r_imported from 'ql/test/data/exports/managed_t_r' PREHOOK: type: IMPORT #### A masked pattern was here #### PREHOOK: Output: default@managed_t_r_imported -POSTHOOK: query: -- should have repl.last.id -import table managed_t_r_imported from 'ql/test/data/exports/managed_t_r' +POSTHOOK: query: import table managed_t_r_imported from 'ql/test/data/exports/managed_t_r' POSTHOOK: type: IMPORT #### A masked pattern was here #### POSTHOOK: Output: default@managed_t_r_imported @@ -378,15 +362,11 @@ POSTHOOK: Input: default@ext_t_imported@emp_country=us/emp_state=ca 4 us ca 5 us ca 6 us ca -PREHOOK: query: -- should have repl.last.id --- also - importing an external table replication export would turn the new table into a managed table -import table ext_t_r_imported from 'ql/test/data/exports/ext_t_r' +PREHOOK: query: import table ext_t_r_imported from 'ql/test/data/exports/ext_t_r' PREHOOK: type: IMPORT #### A masked pattern was here #### PREHOOK: Output: default@ext_t_r_imported -POSTHOOK: query: -- should have repl.last.id --- also - importing an external table replication export would turn the new table into a managed table -import table ext_t_r_imported from 'ql/test/data/exports/ext_t_r' +POSTHOOK: query: import table ext_t_r_imported from 'ql/test/data/exports/ext_t_r' POSTHOOK: type: IMPORT #### A masked pattern was here #### POSTHOOK: Output: default@ext_t_r_imported diff --git a/ql/src/test/results/clientpositive/repl_3_exim_metadata.q.out b/ql/src/test/results/clientpositive/repl_3_exim_metadata.q.out index 8387c02..b07451d 100644 --- a/ql/src/test/results/clientpositive/repl_3_exim_metadata.q.out +++ b/ql/src/test/results/clientpositive/repl_3_exim_metadata.q.out @@ -135,13 +135,11 @@ POSTHOOK: Input: default@repldst@emp_country=us/emp_state=ca 4 us ca 5 us ca 6 us ca -PREHOOK: query: -- should be similar, except that select will return no results -import table repldst_md from 'ql/test/data/exports/repldst_md' +PREHOOK: query: import table repldst_md from 'ql/test/data/exports/repldst_md' PREHOOK: type: IMPORT #### A masked pattern was here #### PREHOOK: Output: default@repldst_md -POSTHOOK: query: -- should be similar, except that select will return no results -import table repldst_md from 'ql/test/data/exports/repldst_md' +POSTHOOK: query: import table repldst_md from 'ql/test/data/exports/repldst_md' POSTHOOK: type: IMPORT #### A masked pattern was here #### POSTHOOK: Output: default@repldst_md diff --git a/ql/src/test/results/clientpositive/router_join_ppr.q.out b/ql/src/test/results/clientpositive/router_join_ppr.q.out index bf46fe9..65733e7 100644 --- a/ql/src/test/results/clientpositive/router_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/router_join_ppr.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED FROM src a RIGHT OUTER JOIN @@ -9,9 +7,7 @@ EXPLAIN EXTENDED SELECT a.key, a.value, b.key, b.value WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED FROM src a RIGHT OUTER JOIN diff --git a/ql/src/test/results/clientpositive/runtime_skewjoin_mapjoin_spark.q.out b/ql/src/test/results/clientpositive/runtime_skewjoin_mapjoin_spark.q.out index 824d641..efcf681 100644 --- a/ql/src/test/results/clientpositive/runtime_skewjoin_mapjoin_spark.q.out +++ b/ql/src/test/results/clientpositive/runtime_skewjoin_mapjoin_spark.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- This is mainly intended for spark, to test runtime skew join together with map join - -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- This is mainly intended for spark, to test runtime skew join together with map join - -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 diff --git a/ql/src/test/results/clientpositive/sample1.q.out b/ql/src/test/results/clientpositive/sample1.q.out index f57519b..79f6693 100644 --- a/ql/src/test/results/clientpositive/sample1.q.out +++ b/ql/src/test/results/clientpositive/sample1.q.out @@ -6,14 +6,12 @@ POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING, dt STRING, hr STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 -PREHOOK: query: -- no input pruning, no sample filter -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s WHERE s.ds='2008-04-08' and s.hr='11' PREHOOK: type: QUERY -POSTHOOK: query: -- no input pruning, no sample filter -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s WHERE s.ds='2008-04-08' and s.hr='11' diff --git a/ql/src/test/results/clientpositive/sample2.q.out b/ql/src/test/results/clientpositive/sample2.q.out index 06690df..66874b1 100644 --- a/ql/src/test/results/clientpositive/sample2.q.out +++ b/ql/src/test/results/clientpositive/sample2.q.out @@ -6,15 +6,11 @@ POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 -PREHOOK: query: -- input pruning, no sample filter --- default table sample columns -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2) s PREHOOK: type: QUERY -POSTHOOK: query: -- input pruning, no sample filter --- default table sample columns -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2) s POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/sample3.q.out b/ql/src/test/results/clientpositive/sample3.q.out index e49f13c..bfa2c74 100644 --- a/ql/src/test/results/clientpositive/sample3.q.out +++ b/ql/src/test/results/clientpositive/sample3.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- no input pruning, sample filter -EXPLAIN +PREHOOK: query: EXPLAIN SELECT s.key FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 on key) s PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- no input pruning, sample filter -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT s.key FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 on key) s POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/sample4.q.out b/ql/src/test/results/clientpositive/sample4.q.out index 4652dd8..3459127 100644 --- a/ql/src/test/results/clientpositive/sample4.q.out +++ b/ql/src/test/results/clientpositive/sample4.q.out @@ -6,15 +6,11 @@ POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 -PREHOOK: query: -- bucket column is the same as table sample --- No need for sample filter -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2 on key) s PREHOOK: type: QUERY -POSTHOOK: query: -- bucket column is the same as table sample --- No need for sample filter -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2 on key) s POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/sample6.q.out b/ql/src/test/results/clientpositive/sample6.q.out index 475fe02..ee6847d 100644 --- a/ql/src/test/results/clientpositive/sample6.q.out +++ b/ql/src/test/results/clientpositive/sample6.q.out @@ -6,13 +6,11 @@ POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 -PREHOOK: query: -- both input pruning and sample filter -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s PREHOOK: type: QUERY -POSTHOOK: query: -- both input pruning and sample filter -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/sample7.q.out b/ql/src/test/results/clientpositive/sample7.q.out index 1d0f0e8..c4bfcb8 100644 --- a/ql/src/test/results/clientpositive/sample7.q.out +++ b/ql/src/test/results/clientpositive/sample7.q.out @@ -6,14 +6,12 @@ POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 -PREHOOK: query: -- both input pruning and sample filter -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s WHERE s.key > 100 PREHOOK: type: QUERY -POSTHOOK: query: -- both input pruning and sample filter -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s WHERE s.key > 100 diff --git a/ql/src/test/results/clientpositive/sample8.q.out b/ql/src/test/results/clientpositive/sample8.q.out index b316331..01cf738 100644 --- a/ql/src/test/results/clientpositive/sample8.q.out +++ b/ql/src/test/results/clientpositive/sample8.q.out @@ -1,16 +1,10 @@ -PREHOOK: query: -- sampling with join and alias --- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT s.* FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON key) s JOIN srcpart TABLESAMPLE (BUCKET 1 OUT OF 10 ON key) t WHERE t.key = s.key and t.value = s.value and s.ds='2008-04-08' and s.hr='11' PREHOOK: type: QUERY -POSTHOOK: query: -- sampling with join and alias --- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT s.* FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON key) s JOIN srcpart TABLESAMPLE (BUCKET 1 OUT OF 10 ON key) t diff --git a/ql/src/test/results/clientpositive/sample_islocalmode_hook.q.out b/ql/src/test/results/clientpositive/sample_islocalmode_hook.q.out index 094cc8d..438b96b 100644 --- a/ql/src/test/results/clientpositive/sample_islocalmode_hook.q.out +++ b/ql/src/test/results/clientpositive/sample_islocalmode_hook.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) - --- create file inputs -create table sih_i_part (key int, value string) partitioned by (p string) +PREHOOK: query: create table sih_i_part (key int, value string) partitioned by (p string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@sih_i_part -POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) - --- create file inputs -create table sih_i_part (key int, value string) partitioned by (p string) +POSTHOOK: query: create table sih_i_part (key int, value string) partitioned by (p string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@sih_i_part @@ -72,27 +66,18 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@sih_src2 POSTHOOK: Lineage: sih_src2.key SIMPLE [(sih_src)sih_src.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: sih_src2.value SIMPLE [(sih_src)sih_src.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: -- Relaxing hive.exec.mode.local.auto.input.files.max=1. --- Hadoop20 will not generate more splits than there are files (one). --- Hadoop23 generate splits correctly (four), hence the max needs to be adjusted to ensure running in local mode. --- Default value is hive.exec.mode.local.auto.input.files.max=4 which produces expected behavior on Hadoop23. --- hive.sample.seednumber is required because Hadoop23 generates multiple splits and tablesample is non-repeatable without it. - --- sample split, running locally limited by num tasks -select count(1) from sih_src tablesample(1 percent) +PREHOOK: query: select count(1) from sih_src tablesample(1 percent) PREHOOK: type: QUERY PREHOOK: Input: default@sih_src #### A masked pattern was here #### 25 -PREHOOK: query: -- sample two tables -select count(1) from sih_src tablesample(1 percent) a join sih_src2 tablesample(1 percent) b on a.key = b.key +PREHOOK: query: select count(1) from sih_src tablesample(1 percent) a join sih_src2 tablesample(1 percent) b on a.key = b.key PREHOOK: type: QUERY PREHOOK: Input: default@sih_src PREHOOK: Input: default@sih_src2 #### A masked pattern was here #### 49 -PREHOOK: query: -- sample split, running locally limited by max bytes -select count(1) from sih_src tablesample(1 percent) +PREHOOK: query: select count(1) from sih_src tablesample(1 percent) PREHOOK: type: QUERY PREHOOK: Input: default@sih_src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/sample_islocalmode_hook_use_metadata.q.out b/ql/src/test/results/clientpositive/sample_islocalmode_hook_use_metadata.q.out index d268837..318f380 100644 --- a/ql/src/test/results/clientpositive/sample_islocalmode_hook_use_metadata.q.out +++ b/ql/src/test/results/clientpositive/sample_islocalmode_hook_use_metadata.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) - --- create file inputs -create table sih_i_part (key int, value string) partitioned by (p string) +PREHOOK: query: create table sih_i_part (key int, value string) partitioned by (p string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@sih_i_part -POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) - --- create file inputs -create table sih_i_part (key int, value string) partitioned by (p string) +POSTHOOK: query: create table sih_i_part (key int, value string) partitioned by (p string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@sih_i_part @@ -72,15 +66,7 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@sih_src2 POSTHOOK: Lineage: sih_src2.key SIMPLE [(sih_src)sih_src.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: sih_src2.value SIMPLE [(sih_src)sih_src.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: -- Relaxing hive.exec.mode.local.auto.input.files.max=1. --- Hadoop20 will not generate more splits than there are files (one). --- Hadoop23 generate splits correctly (four), hence the max needs to be adjusted to ensure running in local mode. --- Default value is hive.exec.mode.local.auto.input.files.max=4 which produces expected behavior on Hadoop23. --- hive.sample.seednumber is required because Hadoop23 generates multiple splits and tablesample is non-repeatable without it. - --- sample split, running locally limited by num tasks - -desc formatted sih_src +PREHOOK: query: desc formatted sih_src PREHOOK: type: DESCTABLE PREHOOK: Input: default@sih_src # col_name data_type comment diff --git a/ql/src/test/results/clientpositive/script_env_var1.q.out b/ql/src/test/results/clientpositive/script_env_var1.q.out index cd39eb8..c1181b2 100644 --- a/ql/src/test/results/clientpositive/script_env_var1.q.out +++ b/ql/src/test/results/clientpositive/script_env_var1.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- Verifies that script operator ID environment variables have unique values --- in each instance of the script operator. -SELECT count(1) FROM +PREHOOK: query: SELECT count(1) FROM ( SELECT * FROM (SELECT TRANSFORM('echo $HIVE_SCRIPT_OPERATOR_ID') USING 'sh' AS key FROM src order by key LIMIT 1)x UNION ALL SELECT * FROM (SELECT TRANSFORM('echo $HIVE_SCRIPT_OPERATOR_ID') USING 'sh' AS key FROM src order by key LIMIT 1)y ) a GROUP BY key PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Verifies that script operator ID environment variables have unique values --- in each instance of the script operator. -SELECT count(1) FROM +POSTHOOK: query: SELECT count(1) FROM ( SELECT * FROM (SELECT TRANSFORM('echo $HIVE_SCRIPT_OPERATOR_ID') USING 'sh' AS key FROM src order by key LIMIT 1)x UNION ALL SELECT * FROM (SELECT TRANSFORM('echo $HIVE_SCRIPT_OPERATOR_ID') USING 'sh' AS key FROM src order by key LIMIT 1)y ) a GROUP BY key POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/script_env_var2.q.out b/ql/src/test/results/clientpositive/script_env_var2.q.out index c3bb990..58a0936 100644 --- a/ql/src/test/results/clientpositive/script_env_var2.q.out +++ b/ql/src/test/results/clientpositive/script_env_var2.q.out @@ -1,12 +1,10 @@ -PREHOOK: query: -- Same test as script_env_var1, but test setting the variable name -SELECT count(1) FROM +PREHOOK: query: SELECT count(1) FROM ( SELECT * FROM (SELECT TRANSFORM('echo $MY_ID') USING 'sh' AS key FROM src LIMIT 1)a UNION ALL SELECT * FROM (SELECT TRANSFORM('echo $MY_ID') USING 'sh' AS key FROM src LIMIT 1)b ) a GROUP BY key PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Same test as script_env_var1, but test setting the variable name -SELECT count(1) FROM +POSTHOOK: query: SELECT count(1) FROM ( SELECT * FROM (SELECT TRANSFORM('echo $MY_ID') USING 'sh' AS key FROM src LIMIT 1)a UNION ALL SELECT * FROM (SELECT TRANSFORM('echo $MY_ID') USING 'sh' AS key FROM src LIMIT 1)b ) a GROUP BY key POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/script_pipe.q.out b/ql/src/test/results/clientpositive/script_pipe.q.out index 0cdf2d0..2a59fe2 100644 --- a/ql/src/test/results/clientpositive/script_pipe.q.out +++ b/ql/src/test/results/clientpositive/script_pipe.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- Tests exception in ScriptOperator.close() by passing to the operator a small amount of data -EXPLAIN SELECT TRANSFORM(*) USING 'true' AS a, b, c FROM (SELECT * FROM src LIMIT 1) tmp +PREHOOK: query: EXPLAIN SELECT TRANSFORM(*) USING 'true' AS a, b, c FROM (SELECT * FROM src LIMIT 1) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- Tests exception in ScriptOperator.close() by passing to the operator a small amount of data -EXPLAIN SELECT TRANSFORM(*) USING 'true' AS a, b, c FROM (SELECT * FROM src LIMIT 1) tmp +POSTHOOK: query: EXPLAIN SELECT TRANSFORM(*) USING 'true' AS a, b, c FROM (SELECT * FROM src LIMIT 1) tmp POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -56,11 +54,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Tests exception in ScriptOperator.processOp() by passing extra data needed to fill pipe buffer -EXPLAIN SELECT TRANSFORM(key, value, key, value, key, value, key, value, key, value, key, value) USING 'head -n 1' as a,b,c,d FROM src +PREHOOK: query: EXPLAIN SELECT TRANSFORM(key, value, key, value, key, value, key, value, key, value, key, value) USING 'head -n 1' as a,b,c,d FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- Tests exception in ScriptOperator.processOp() by passing extra data needed to fill pipe buffer -EXPLAIN SELECT TRANSFORM(key, value, key, value, key, value, key, value, key, value, key, value) USING 'head -n 1' as a,b,c,d FROM src +POSTHOOK: query: EXPLAIN SELECT TRANSFORM(key, value, key, value, key, value, key, value, key, value, key, value) USING 'head -n 1' as a,b,c,d FROM src POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/select_dummy_source.q.out b/ql/src/test/results/clientpositive/select_dummy_source.q.out index a23c8ad..b6fa03f 100644 --- a/ql/src/test/results/clientpositive/select_dummy_source.q.out +++ b/ql/src/test/results/clientpositive/select_dummy_source.q.out @@ -31,12 +31,10 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### a 100 -PREHOOK: query: --evaluation -explain +PREHOOK: query: explain select 1 + 1 PREHOOK: type: QUERY -POSTHOOK: query: --evaluation -explain +POSTHOOK: query: explain select 1 + 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -66,12 +64,10 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 2 -PREHOOK: query: -- explode (not possible for lateral view) -explain +PREHOOK: query: explain select explode(array('a', 'b')) PREHOOK: type: QUERY -POSTHOOK: query: -- explode (not possible for lateral view) -explain +POSTHOOK: query: explain select explode(array('a', 'b')) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -220,12 +216,10 @@ POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### a b -PREHOOK: query: -- subquery -explain +PREHOOK: query: explain select 2 + 3,x from (select 1 + 2 x) X PREHOOK: type: QUERY -POSTHOOK: query: -- subquery -explain +POSTHOOK: query: explain select 2 + 3,x from (select 1 + 2 x) X POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/select_same_col.q.out b/ql/src/test/results/clientpositive/select_same_col.q.out index 66bf5c2..b03b3de 100644 --- a/ql/src/test/results/clientpositive/select_same_col.q.out +++ b/ql/src/test/results/clientpositive/select_same_col.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF - -drop table srclimit +PREHOOK: query: drop table srclimit PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_BEFORE_DIFF - -drop table srclimit +POSTHOOK: query: drop table srclimit POSTHOOK: type: DROPTABLE PREHOOK: query: create table srclimit as select * from src limit 10 PREHOOK: type: CREATETABLE_AS_SELECT diff --git a/ql/src/test/results/clientpositive/semicolon.q.out b/ql/src/test/results/clientpositive/semicolon.q.out index 5889833..580f0f3 100644 --- a/ql/src/test/results/clientpositive/semicolon.q.out +++ b/ql/src/test/results/clientpositive/semicolon.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- comment --- comment; --- comment -SELECT COUNT(1) FROM src +PREHOOK: query: SELECT COUNT(1) FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- comment --- comment; --- comment -SELECT COUNT(1) FROM src +POSTHOOK: query: SELECT COUNT(1) FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/serde_user_properties.q.out b/ql/src/test/results/clientpositive/serde_user_properties.q.out index c671d33..afab64c 100644 --- a/ql/src/test/results/clientpositive/serde_user_properties.q.out +++ b/ql/src/test/results/clientpositive/serde_user_properties.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- HIVE-2906 Table properties in SQL - -explain extended select key from src +PREHOOK: query: explain extended select key from src PREHOOK: type: QUERY -POSTHOOK: query: -- HIVE-2906 Table properties in SQL - -explain extended select key from src +POSTHOOK: query: explain extended select key from src POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/show_columns.q.out b/ql/src/test/results/clientpositive/show_columns.q.out index c653671..4a67fb3 100644 --- a/ql/src/test/results/clientpositive/show_columns.q.out +++ b/ql/src/test/results/clientpositive/show_columns.q.out @@ -36,12 +36,10 @@ POSTHOOK: Input: default@shcol_test key value ds -PREHOOK: query: -- SHOW COLUMNS -CREATE DATABASE test_db +PREHOOK: query: CREATE DATABASE test_db PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:test_db -POSTHOOK: query: -- SHOW COLUMNS -CREATE DATABASE test_db +POSTHOOK: query: CREATE DATABASE test_db POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:test_db PREHOOK: query: USE test_db @@ -58,12 +56,10 @@ POSTHOOK: query: CREATE TABLE foo(a INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:test_db POSTHOOK: Output: test_db@foo -PREHOOK: query: -- SHOW COLUMNS basic syntax tests -USE test_db +PREHOOK: query: USE test_db PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:test_db -POSTHOOK: query: -- SHOW COLUMNS basic syntax tests -USE test_db +POSTHOOK: query: USE test_db POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:test_db PREHOOK: query: SHOW COLUMNS from foo @@ -80,12 +76,10 @@ POSTHOOK: query: SHOW COLUMNS in foo POSTHOOK: type: SHOWCOLUMNS POSTHOOK: Input: test_db@foo a -PREHOOK: query: -- SHOW COLUMNS from a database with a name that requires escaping -CREATE DATABASE `database` +PREHOOK: query: CREATE DATABASE `database` PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:database -POSTHOOK: query: -- SHOW COLUMNS from a database with a name that requires escaping -CREATE DATABASE `database` +POSTHOOK: query: CREATE DATABASE `database` POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:database PREHOOK: query: USE `database` diff --git a/ql/src/test/results/clientpositive/show_create_table_alter.q.out b/ql/src/test/results/clientpositive/show_create_table_alter.q.out index d1c1a53..6023f47 100644 --- a/ql/src/test/results/clientpositive/show_create_table_alter.q.out +++ b/ql/src/test/results/clientpositive/show_create_table_alter.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- Test SHOW CREATE TABLE on an external, clustered and sorted table. Then test the query again after ALTERs. - -CREATE EXTERNAL TABLE tmp_showcrt1 (key smallint, value float) +PREHOOK: query: CREATE EXTERNAL TABLE tmp_showcrt1 (key smallint, value float) CLUSTERED BY (key) SORTED BY (value DESC) INTO 5 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: -- Test SHOW CREATE TABLE on an external, clustered and sorted table. Then test the query again after ALTERs. - -CREATE EXTERNAL TABLE tmp_showcrt1 (key smallint, value float) +POSTHOOK: query: CREATE EXTERNAL TABLE tmp_showcrt1 (key smallint, value float) CLUSTERED BY (key) SORTED BY (value DESC) INTO 5 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -36,13 +32,11 @@ LOCATION #### A masked pattern was here #### TBLPROPERTIES ( #### A masked pattern was here #### -PREHOOK: query: -- Add a comment to the table, change the EXTERNAL property, and test SHOW CREATE TABLE on the change. -ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('comment'='temporary table', 'EXTERNAL'='FALSE') +PREHOOK: query: ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('comment'='temporary table', 'EXTERNAL'='FALSE') PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: default@tmp_showcrt1 PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: -- Add a comment to the table, change the EXTERNAL property, and test SHOW CREATE TABLE on the change. -ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('comment'='temporary table', 'EXTERNAL'='FALSE') +POSTHOOK: query: ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('comment'='temporary table', 'EXTERNAL'='FALSE') POSTHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: Input: default@tmp_showcrt1 POSTHOOK: Output: default@tmp_showcrt1 @@ -72,13 +66,11 @@ LOCATION TBLPROPERTIES ( 'EXTERNAL'='FALSE', #### A masked pattern was here #### -PREHOOK: query: -- Alter the table comment, change the EXTERNAL property back and test SHOW CREATE TABLE on the change. -ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('comment'='changed comment', 'EXTERNAL'='TRUE') +PREHOOK: query: ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('comment'='changed comment', 'EXTERNAL'='TRUE') PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: default@tmp_showcrt1 PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: -- Alter the table comment, change the EXTERNAL property back and test SHOW CREATE TABLE on the change. -ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('comment'='changed comment', 'EXTERNAL'='TRUE') +POSTHOOK: query: ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('comment'='changed comment', 'EXTERNAL'='TRUE') POSTHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: Input: default@tmp_showcrt1 POSTHOOK: Output: default@tmp_showcrt1 @@ -107,13 +99,11 @@ LOCATION #### A masked pattern was here #### TBLPROPERTIES ( #### A masked pattern was here #### -PREHOOK: query: -- Change the 'SORTBUCKETCOLSPREFIX' property and test SHOW CREATE TABLE. The output should not change. -ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('SORTBUCKETCOLSPREFIX'='FALSE') +PREHOOK: query: ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('SORTBUCKETCOLSPREFIX'='FALSE') PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: default@tmp_showcrt1 PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: -- Change the 'SORTBUCKETCOLSPREFIX' property and test SHOW CREATE TABLE. The output should not change. -ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('SORTBUCKETCOLSPREFIX'='FALSE') +POSTHOOK: query: ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('SORTBUCKETCOLSPREFIX'='FALSE') POSTHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: Input: default@tmp_showcrt1 POSTHOOK: Output: default@tmp_showcrt1 @@ -142,13 +132,11 @@ LOCATION #### A masked pattern was here #### TBLPROPERTIES ( #### A masked pattern was here #### -PREHOOK: query: -- Alter the storage handler of the table, and test SHOW CREATE TABLE. -ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('storage_handler'='org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler') +PREHOOK: query: ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('storage_handler'='org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler') PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: default@tmp_showcrt1 PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: -- Alter the storage handler of the table, and test SHOW CREATE TABLE. -ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('storage_handler'='org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler') +POSTHOOK: query: ALTER TABLE tmp_showcrt1 SET TBLPROPERTIES ('storage_handler'='org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler') POSTHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: Input: default@tmp_showcrt1 POSTHOOK: Output: default@tmp_showcrt1 diff --git a/ql/src/test/results/clientpositive/show_create_table_db_table.q.out b/ql/src/test/results/clientpositive/show_create_table_db_table.q.out index 495f4b5..c9a6bfb 100644 --- a/ql/src/test/results/clientpositive/show_create_table_db_table.q.out +++ b/ql/src/test/results/clientpositive/show_create_table_db_table.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- Test SHOW CREATE TABLE on a table name of format "db.table". - -CREATE DATABASE tmp_feng comment 'for show create table test' +PREHOOK: query: CREATE DATABASE tmp_feng comment 'for show create table test' PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:tmp_feng -POSTHOOK: query: -- Test SHOW CREATE TABLE on a table name of format "db.table". - -CREATE DATABASE tmp_feng comment 'for show create table test' +POSTHOOK: query: CREATE DATABASE tmp_feng comment 'for show create table test' POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:tmp_feng PREHOOK: query: SHOW DATABASES diff --git a/ql/src/test/results/clientpositive/show_create_table_delimited.q.out b/ql/src/test/results/clientpositive/show_create_table_delimited.q.out index e14f850..00ed838 100644 --- a/ql/src/test/results/clientpositive/show_create_table_delimited.q.out +++ b/ql/src/test/results/clientpositive/show_create_table_delimited.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- Test SHOW CREATE TABLE on a table with delimiters, stored format, and location. - -CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) +PREHOOK: query: CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' COLLECTION ITEMS TERMINATED BY '|' MAP KEYS TERMINATED BY '\045' LINES TERMINATED BY '\n' STORED AS textfile #### A masked pattern was here #### @@ -8,9 +6,7 @@ PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: -- Test SHOW CREATE TABLE on a table with delimiters, stored format, and location. - -CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) +POSTHOOK: query: CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' COLLECTION ITEMS TERMINATED BY '|' MAP KEYS TERMINATED BY '\045' LINES TERMINATED BY '\n' STORED AS textfile #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/show_create_table_partitioned.q.out b/ql/src/test/results/clientpositive/show_create_table_partitioned.q.out index 100fde6..3e850aa 100644 --- a/ql/src/test/results/clientpositive/show_create_table_partitioned.q.out +++ b/ql/src/test/results/clientpositive/show_create_table_partitioned.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- Test SHOW CREATE TABLE on a table with partitions and column comments. - -CREATE EXTERNAL TABLE tmp_showcrt1 (key string, newvalue boolean COMMENT 'a new value') +PREHOOK: query: CREATE EXTERNAL TABLE tmp_showcrt1 (key string, newvalue boolean COMMENT 'a new value') COMMENT 'temporary table' PARTITIONED BY (value bigint COMMENT 'some value') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: -- Test SHOW CREATE TABLE on a table with partitions and column comments. - -CREATE EXTERNAL TABLE tmp_showcrt1 (key string, newvalue boolean COMMENT 'a new value') +POSTHOOK: query: CREATE EXTERNAL TABLE tmp_showcrt1 (key string, newvalue boolean COMMENT 'a new value') COMMENT 'temporary table' PARTITIONED BY (value bigint COMMENT 'some value') POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/show_create_table_serde.q.out b/ql/src/test/results/clientpositive/show_create_table_serde.q.out index 44414b2..d98bf00 100644 --- a/ql/src/test/results/clientpositive/show_create_table_serde.q.out +++ b/ql/src/test/results/clientpositive/show_create_table_serde.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Test SHOW CREATE TABLE on a table with serde. - -CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) +PREHOOK: query: CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: -- Test SHOW CREATE TABLE on a table with serde. - -CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) +POSTHOOK: query: CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmp_showcrt1 @@ -49,8 +45,7 @@ POSTHOOK: query: DROP TABLE tmp_showcrt1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@tmp_showcrt1 POSTHOOK: Output: default@tmp_showcrt1 -PREHOOK: query: -- without a storage handler -CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) +PREHOOK: query: CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) COMMENT 'temporary table' ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' @@ -58,8 +53,7 @@ OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: -- without a storage handler -CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) +POSTHOOK: query: CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) COMMENT 'temporary table' ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' @@ -96,8 +90,7 @@ POSTHOOK: query: DROP TABLE tmp_showcrt1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@tmp_showcrt1 POSTHOOK: Output: default@tmp_showcrt1 -PREHOOK: query: -- without a storage handler / with custom serde params -CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) +PREHOOK: query: CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' WITH SERDEPROPERTIES ('custom.property.key1'='custom.property.value1', 'custom.property.key2'='custom.property.value2') STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' @@ -105,8 +98,7 @@ OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileOutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: -- without a storage handler / with custom serde params -CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) +POSTHOOK: query: CREATE TABLE tmp_showcrt1 (key int, value string, newvalue bigint) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' WITH SERDEPROPERTIES ('custom.property.key1'='custom.property.value1', 'custom.property.key2'='custom.property.value2') STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.RCFileInputFormat' @@ -145,16 +137,14 @@ POSTHOOK: query: DROP TABLE tmp_showcrt1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@tmp_showcrt1 POSTHOOK: Output: default@tmp_showcrt1 -PREHOOK: query: -- with a storage handler and serde properties -CREATE EXTERNAL TABLE tmp_showcrt1 (key string, value boolean) +PREHOOK: query: CREATE EXTERNAL TABLE tmp_showcrt1 (key string, value boolean) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED BY 'org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler' WITH SERDEPROPERTIES ('field.delim'=',', 'serialization.format'='$') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmp_showcrt1 -POSTHOOK: query: -- with a storage handler and serde properties -CREATE EXTERNAL TABLE tmp_showcrt1 (key string, value boolean) +POSTHOOK: query: CREATE EXTERNAL TABLE tmp_showcrt1 (key string, value boolean) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED BY 'org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler' WITH SERDEPROPERTIES ('field.delim'=',', 'serialization.format'='$') diff --git a/ql/src/test/results/clientpositive/show_create_table_view.q.out b/ql/src/test/results/clientpositive/show_create_table_view.q.out index 94f33cf..a27209d 100644 --- a/ql/src/test/results/clientpositive/show_create_table_view.q.out +++ b/ql/src/test/results/clientpositive/show_create_table_view.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- Test SHOW CREATE TABLE on a view name. - -CREATE VIEW tmp_copy_src AS SELECT * FROM src +PREHOOK: query: CREATE VIEW tmp_copy_src AS SELECT * FROM src PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@tmp_copy_src -POSTHOOK: query: -- Test SHOW CREATE TABLE on a view name. - -CREATE VIEW tmp_copy_src AS SELECT * FROM src +POSTHOOK: query: CREATE VIEW tmp_copy_src AS SELECT * FROM src POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/show_partitions.q.out b/ql/src/test/results/clientpositive/show_partitions.q.out index b439273..8b7473a 100644 --- a/ql/src/test/results/clientpositive/show_partitions.q.out +++ b/ql/src/test/results/clientpositive/show_partitions.q.out @@ -115,12 +115,10 @@ POSTHOOK: query: ALTER TABLE srcpart ADD PARTITION (ds='4', hr='5') POSTHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: Output: db1@srcpart POSTHOOK: Output: db1@srcpart@ds=4/hr=5 -PREHOOK: query: -- from db1 to default db -SHOW PARTITIONS default.srcpart PARTITION(hr='11') +PREHOOK: query: SHOW PARTITIONS default.srcpart PARTITION(hr='11') PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@srcpart -POSTHOOK: query: -- from db1 to default db -SHOW PARTITIONS default.srcpart PARTITION(hr='11') +POSTHOOK: query: SHOW PARTITIONS default.srcpart PARTITION(hr='11') POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@srcpart ds=2008-04-08/hr=11 @@ -132,12 +130,10 @@ POSTHOOK: query: SHOW PARTITIONS default.srcpart PARTITION(ds='2008-04-08', hr=' POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@srcpart ds=2008-04-08/hr=12 -PREHOOK: query: -- from db1 to db1 -SHOW PARTITIONS srcpart PARTITION(ds='4') +PREHOOK: query: SHOW PARTITIONS srcpart PARTITION(ds='4') PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: db1@srcpart -POSTHOOK: query: -- from db1 to db1 -SHOW PARTITIONS srcpart PARTITION(ds='4') +POSTHOOK: query: SHOW PARTITIONS srcpart PARTITION(ds='4') POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: db1@srcpart ds=4/hr=4 @@ -155,12 +151,10 @@ PREHOOK: Input: database:default POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: -- from default to db1 -SHOW PARTITIONS db1.srcpart PARTITION(ds='4') +PREHOOK: query: SHOW PARTITIONS db1.srcpart PARTITION(ds='4') PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: db1@srcpart -POSTHOOK: query: -- from default to db1 -SHOW PARTITIONS db1.srcpart PARTITION(ds='4') +POSTHOOK: query: SHOW PARTITIONS db1.srcpart PARTITION(ds='4') POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: db1@srcpart ds=4/hr=4 diff --git a/ql/src/test/results/clientpositive/show_tables.q.out b/ql/src/test/results/clientpositive/show_tables.q.out index 432c2ab..4c46c09 100644 --- a/ql/src/test/results/clientpositive/show_tables.q.out +++ b/ql/src/test/results/clientpositive/show_tables.q.out @@ -76,12 +76,10 @@ POSTHOOK: type: SHOWTABLES POSTHOOK: Input: database:default shtb_test1 shtb_test2 -PREHOOK: query: -- SHOW TABLES FROM/IN database -CREATE DATABASE test_db +PREHOOK: query: CREATE DATABASE test_db PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:test_db -POSTHOOK: query: -- SHOW TABLES FROM/IN database -CREATE DATABASE test_db +POSTHOOK: query: CREATE DATABASE test_db POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:test_db PREHOOK: query: USE test_db @@ -114,12 +112,10 @@ POSTHOOK: query: CREATE TABLE baz(a INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:test_db POSTHOOK: Output: test_db@baz -PREHOOK: query: -- SHOW TABLES basic syntax tests -USE default +PREHOOK: query: USE default PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:default -POSTHOOK: query: -- SHOW TABLES basic syntax tests -USE default +POSTHOOK: query: USE default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default PREHOOK: query: SHOW TABLES FROM test_db @@ -198,11 +194,9 @@ PREHOOK: Input: database:test_db POSTHOOK: query: SHOW TABLES IN test_db LIKE "nomatch" POSTHOOK: type: SHOWTABLES POSTHOOK: Input: database:test_db -PREHOOK: query: -- SHOW TABLE EXTENDED basic syntax tests and wildcard -SHOW TABLE EXTENDED IN test_db LIKE foo +PREHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE foo PREHOOK: type: SHOW_TABLESTATUS -POSTHOOK: query: -- SHOW TABLE EXTENDED basic syntax tests and wildcard -SHOW TABLE EXTENDED IN test_db LIKE foo +POSTHOOK: query: SHOW TABLE EXTENDED IN test_db LIKE foo POSTHOOK: type: SHOW_TABLESTATUS tableName:foo #### A masked pattern was here #### @@ -358,12 +352,10 @@ maxFileSize:0 minFileSize:0 #### A masked pattern was here #### -PREHOOK: query: -- SHOW TABLES from a database with a name that requires escaping -CREATE DATABASE `database` +PREHOOK: query: CREATE DATABASE `database` PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:database -POSTHOOK: query: -- SHOW TABLES from a database with a name that requires escaping -CREATE DATABASE `database` +POSTHOOK: query: CREATE DATABASE `database` POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:database PREHOOK: query: USE `database` diff --git a/ql/src/test/results/clientpositive/show_tblproperties.q.out b/ql/src/test/results/clientpositive/show_tblproperties.q.out index e1c6670..9377beb 100644 --- a/ql/src/test/results/clientpositive/show_tblproperties.q.out +++ b/ql/src/test/results/clientpositive/show_tblproperties.q.out @@ -102,11 +102,9 @@ POSTHOOK: query: alter table tmpfoo set tblproperties ("tmp" = "true1") POSTHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: Input: db1@tmpfoo POSTHOOK: Output: db1@tmpfoo -PREHOOK: query: -- from db1 to default db -show tblproperties default.tmpfoo +PREHOOK: query: show tblproperties default.tmpfoo PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: -- from db1 to default db -show tblproperties default.tmpfoo +POSTHOOK: query: show tblproperties default.tmpfoo POSTHOOK: type: SHOW_TBLPROPERTIES bar bar value #### A masked pattern was here #### @@ -121,11 +119,9 @@ PREHOOK: type: SHOW_TBLPROPERTIES POSTHOOK: query: show tblproperties default.tmpfoo("bar") POSTHOOK: type: SHOW_TBLPROPERTIES bar value -PREHOOK: query: -- from db1 to db1 -show tblproperties tmpfoo +PREHOOK: query: show tblproperties tmpfoo PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: -- from db1 to db1 -show tblproperties tmpfoo +POSTHOOK: query: show tblproperties tmpfoo POSTHOOK: type: SHOW_TBLPROPERTIES bar bar value1 #### A masked pattern was here #### @@ -146,11 +142,9 @@ PREHOOK: Input: database:default POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: -- from default to db1 -show tblproperties db1.tmpfoo +PREHOOK: query: show tblproperties db1.tmpfoo PREHOOK: type: SHOW_TBLPROPERTIES -POSTHOOK: query: -- from default to db1 -show tblproperties db1.tmpfoo +POSTHOOK: query: show tblproperties db1.tmpfoo POSTHOOK: type: SHOW_TBLPROPERTIES bar bar value1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/show_views.q.out b/ql/src/test/results/clientpositive/show_views.q.out index 61b5f1d..723b334 100644 --- a/ql/src/test/results/clientpositive/show_views.q.out +++ b/ql/src/test/results/clientpositive/show_views.q.out @@ -135,12 +135,10 @@ POSTHOOK: query: SHOW VIEWS 'shtb_*' POSTHOOK: type: SHOWVIEWS shtb_test1_view1 shtb_test2_view2 -PREHOOK: query: -- SHOW VIEWS basic syntax tests -USE default +PREHOOK: query: USE default PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:default -POSTHOOK: query: -- SHOW VIEWS basic syntax tests -USE default +POSTHOOK: query: USE default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default PREHOOK: query: SHOW VIEWS FROM test1 @@ -175,12 +173,10 @@ PREHOOK: query: SHOW VIEWS IN test2 LIKE "nomatch" PREHOOK: type: SHOWVIEWS POSTHOOK: query: SHOW VIEWS IN test2 LIKE "nomatch" POSTHOOK: type: SHOWVIEWS -PREHOOK: query: -- SHOW VIEWS from a database with a name that requires escaping -CREATE DATABASE `database` +PREHOOK: query: CREATE DATABASE `database` PREHOOK: type: CREATEDATABASE PREHOOK: Output: database:database -POSTHOOK: query: -- SHOW VIEWS from a database with a name that requires escaping -CREATE DATABASE `database` +POSTHOOK: query: CREATE DATABASE `database` POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:database PREHOOK: query: USE `database` diff --git a/ql/src/test/results/clientpositive/skewjoin.q.out b/ql/src/test/results/clientpositive/skewjoin.q.out index 5695732..e792381 100644 --- a/ql/src/test/results/clientpositive/skewjoin.q.out +++ b/ql/src/test/results/clientpositive/skewjoin.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin1.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin1.q.out index 8c2d780..2cffbd8 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin1.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin1.q.out @@ -34,20 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- copy from skewjoinopt1 --- test compile time skew join and auto map join --- a simple join query with skew on both the tables on the join key --- adding an order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- copy from skewjoinopt1 --- test compile time skew join and auto map join --- a simple join query with skew on both the tables on the join key --- adding an order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -181,14 +171,10 @@ POSTHOOK: Input: default@t2 8 18 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- test outer joins also - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- test outer joins also - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -324,14 +310,10 @@ NULL NULL 5 15 8 18 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- an aggregation at the end should not change anything - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- an aggregation at the end should not change anything - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin10.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin10.q.out index 74408b8..4aeac77 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin10.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin10.q.out @@ -14,13 +14,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmp POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@tmpt1 -PREHOOK: query: -- testing skew on other data types - int -CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) +PREHOOK: query: CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- testing skew on other data types - int -CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) +POSTHOOK: query: CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 @@ -68,26 +66,10 @@ POSTHOOK: Input: default@tmpt2 POSTHOOK: Output: default@t2 POSTHOOK: Lineage: t2.key EXPRESSION [(tmpt2)tmpt2.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: t2.val SIMPLE [(tmpt2)tmpt2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: -- copy from skewjoinopt15 --- test compile time skew join and auto map join --- The skewed key is a integer column. --- Otherwise this test is similar to skewjoinopt1.q --- Both the joined tables are skewed, and the joined column --- is an integer --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- copy from skewjoinopt15 --- test compile time skew join and auto map join --- The skewed key is a integer column. --- Otherwise this test is similar to skewjoinopt1.q --- Both the joined tables are skewed, and the joined column --- is an integer --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -221,14 +203,10 @@ POSTHOOK: Input: default@t2 8 18 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- test outer joins also - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- test outer joins also - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -364,14 +342,10 @@ NULL NULL 5 15 8 18 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- an aggregation at the end should not change anything - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- an aggregation at the end should not change anything - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin11.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin11.q.out index 188f189..c9821e9 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin11.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin11.q.out @@ -34,24 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- copy from skewjoinopt19 --- test compile time skew join and auto map join --- add a test where the skewed key is also the bucketized key --- it should not matter, and the compile time skewed join --- optimization is performed --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- copy from skewjoinopt19 --- test compile time skew join and auto map join --- add a test where the skewed key is also the bucketized key --- it should not matter, and the compile time skewed join --- optimization is performed --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin2.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin2.q.out index ba5e811..5de1cf3 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin2.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin2.q.out @@ -34,24 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- copy from skewjoinopt3 --- test compile time skew join and auto map join --- a simple query with skew on both the tables. One of the skewed --- value is common to both the tables. The skewed value should not be --- repeated in the filter. --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- copy from skewjoinopt3 --- test compile time skew join and auto map join --- a simple query with skew on both the tables. One of the skewed --- value is common to both the tables. The skewed value should not be --- repeated in the filter. --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -185,14 +171,10 @@ POSTHOOK: Input: default@t2 8 18 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- test outer joins also - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- test outer joins also - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin3.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin3.q.out index 61c42a2..1c543a0 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin3.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin3.q.out @@ -34,24 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- copy from skewjoinopt6 --- test compile time skew join and auto map join --- Both the join tables are skewed by 2 keys, and one of the skewed values --- is common to both the tables. The join key is a subset of the skewed key set: --- it only contains the first skewed key for both the tables --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- copy from skewjoinopt6 --- test compile time skew join and auto map join --- Both the join tables are skewed by 2 keys, and one of the skewed values --- is common to both the tables. The join key is a subset of the skewed key set: --- it only contains the first skewed key for both the tables --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin4.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin4.q.out index eb3d918..c3809e9 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin4.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin4.q.out @@ -50,24 +50,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t3 -PREHOOK: query: -- copy from skewjoinopt7 --- test compile time skew join and auto map join --- This test is for validating skewed join compile time optimization for more than --- 2 tables. The join key is the same, and so a 3-way join would be performed. --- 2 of the 3 tables are skewed on the join key --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key PREHOOK: type: QUERY -POSTHOOK: query: -- copy from skewjoinopt7 --- test compile time skew join and auto map join --- This test is for validating skewed join compile time optimization for more than --- 2 tables. The join key is the same, and so a 3-way join would be performed. --- 2 of the 3 tables are skewed on the join key --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin5.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin5.q.out index 83bc8c7..663eb12 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin5.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin5.q.out @@ -32,12 +32,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- copy from skewjoinopt9 --- test compile time skew join and auto map join --- no skew join compile time optimization would be performed if one of the --- join sources is a sub-query consisting of a union all --- adding a order by at the end to make the results deterministic -EXPLAIN +PREHOOK: query: EXPLAIN select * from ( select key, val from T1 @@ -46,12 +41,7 @@ select key, val from T1 ) subq1 join T2 b on subq1.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- copy from skewjoinopt9 --- test compile time skew join and auto map join --- no skew join compile time optimization would be performed if one of the --- join sources is a sub-query consisting of a union all --- adding a order by at the end to make the results deterministic -EXPLAIN +POSTHOOK: query: EXPLAIN select * from ( select key, val from T1 @@ -191,18 +181,14 @@ POSTHOOK: Input: default@t2 8 28 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- no skew join compile time optimization would be performed if one of the --- join sources is a sub-query consisting of a group by -EXPLAIN +PREHOOK: query: EXPLAIN select * from ( select key, count(1) as cnt from T1 group by key ) subq1 join T2 b on subq1.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- no skew join compile time optimization would be performed if one of the --- join sources is a sub-query consisting of a group by -EXPLAIN +POSTHOOK: query: EXPLAIN select * from ( select key, count(1) as cnt from T1 group by key diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin6.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin6.q.out index f87d939..7ce87ec 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin6.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin6.q.out @@ -36,20 +36,10 @@ POSTHOOK: Input: default@t1 POSTHOOK: Output: default@array_valued_t1 POSTHOOK: Lineage: array_valued_t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: array_valued_t1.value EXPRESSION [(t1)t1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: -- copy from skewjoinopt10 --- test compile time skew join and auto map join --- This test is to verify the skew join compile optimization when the join is followed by a lateral view --- adding a order by at the end to make the results deterministic - -explain +PREHOOK: query: explain select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val PREHOOK: type: QUERY -POSTHOOK: query: -- copy from skewjoinopt10 --- test compile time skew join and auto map join --- This test is to verify the skew join compile optimization when the join is followed by a lateral view --- adding a order by at the end to make the results deterministic - -explain +POSTHOOK: query: explain select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin7.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin7.q.out index 2bf8274..e48fbee 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin7.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin7.q.out @@ -32,14 +32,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- copy from skewjoinopt11 --- test compile time skew join and auto map join --- This test is to verify the skew join compile optimization when the join is followed --- by a union. Both sides of a union consist of a join, which should have used --- skew join compile time optimization. --- adding an order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN select * from ( select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key @@ -47,14 +40,7 @@ select * from select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key ) subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- copy from skewjoinopt11 --- test compile time skew join and auto map join --- This test is to verify the skew join compile optimization when the join is followed --- by a union. Both sides of a union consist of a join, which should have used --- skew join compile time optimization. --- adding an order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN select * from ( select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin8.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin8.q.out index 79e7c6e..bd052a7 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin8.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin8.q.out @@ -48,31 +48,13 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t3 -PREHOOK: query: -- copy from skewjoinopt13 --- test compile time skew join and auto map join --- This test is for skewed join compile time optimization for more than 2 tables. --- The join key for table 3 is different from the join key used for joining --- tables 1 and 2. Table 3 is skewed, but since one of the join sources for table --- 3 consist of a sub-query which contains a join, the compile time skew join --- optimization is not performed --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN select * from T1 a join T2 b on a.key = b.key join T3 c on a.val = c.val PREHOOK: type: QUERY -POSTHOOK: query: -- copy from skewjoinopt13 --- test compile time skew join and auto map join --- This test is for skewed join compile time optimization for more than 2 tables. --- The join key for table 3 is different from the join key used for joining --- tables 1 and 2. Table 3 is skewed, but since one of the join sources for table --- 3 consist of a sub-query which contains a join, the compile time skew join --- optimization is not performed --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN select * from T1 a join T2 b on a.key = b.key diff --git a/ql/src/test/results/clientpositive/skewjoin_mapjoin9.q.out b/ql/src/test/results/clientpositive/skewjoin_mapjoin9.q.out index a994bf7..f38c087 100644 --- a/ql/src/test/results/clientpositive/skewjoin_mapjoin9.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_mapjoin9.q.out @@ -50,33 +50,13 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t3 -PREHOOK: query: -- copy from skewjoinopt14 --- test compile time skew join and auto map join --- This test is for skewed join compile time optimization for more than 2 tables. --- The join key for table 3 is different from the join key used for joining --- tables 1 and 2. Tables 1 and 3 are skewed. Since one of the join sources for table --- 3 consist of a sub-query which contains a join, the compile time skew join --- optimization is not enabled for table 3, but it is used for the first join between --- tables 1 and 2 --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN select * from T1 a join T2 b on a.key = b.key join T3 c on a.val = c.val PREHOOK: type: QUERY -POSTHOOK: query: -- copy from skewjoinopt14 --- test compile time skew join and auto map join --- This test is for skewed join compile time optimization for more than 2 tables. --- The join key for table 3 is different from the join key used for joining --- tables 1 and 2. Tables 1 and 3 are skewed. Since one of the join sources for table --- 3 consist of a sub-query which contains a join, the compile time skew join --- optimization is not enabled for table 3, but it is used for the first join between --- tables 1 and 2 --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN select * from T1 a join T2 b on a.key = b.key diff --git a/ql/src/test/results/clientpositive/skewjoin_union_remove_1.q.out b/ql/src/test/results/clientpositive/skewjoin_union_remove_1.q.out index 7d5e353..a9883c5 100644 --- a/ql/src/test/results/clientpositive/skewjoin_union_remove_1.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_union_remove_1.q.out @@ -1,25 +1,9 @@ -PREHOOK: query: -- This is to test the union->selectstar->filesink and skewjoin optimization --- Union of 2 map-reduce subqueries is performed for the skew join --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output, it might be easier to run the test --- only on hadoop 23 - -CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- This is to test the union->selectstar->filesink and skewjoin optimization --- Union of 2 map-reduce subqueries is performed for the skew join --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output, it might be easier to run the test --- only on hadoop 23 - -CREATE TABLE T1(key STRING, val STRING) +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -50,14 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- a simple join query with skew on both the tables on the join key - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- a simple join query with skew on both the tables on the join key - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -191,14 +171,10 @@ POSTHOOK: Input: default@t2 8 18 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- test outer joins also - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- test outer joins also - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoin_union_remove_2.q.out b/ql/src/test/results/clientpositive/skewjoin_union_remove_2.q.out index 3744f85..71c4690 100644 --- a/ql/src/test/results/clientpositive/skewjoin_union_remove_2.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_union_remove_2.q.out @@ -50,26 +50,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t3 -PREHOOK: query: -- This is to test the union->selectstar->filesink and skewjoin optimization --- Union of 3 map-reduce subqueries is performed for the skew join --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table, it might be easier --- to run the test only on hadoop 23 - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key PREHOOK: type: QUERY -POSTHOOK: query: -- This is to test the union->selectstar->filesink and skewjoin optimization --- Union of 3 map-reduce subqueries is performed for the skew join --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table, it might be easier --- to run the test only on hadoop 23 - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoinopt1.q.out b/ql/src/test/results/clientpositive/skewjoinopt1.q.out index 8c265f4..789bfb1 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt1.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt1.q.out @@ -34,16 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- a simple join query with skew on both the tables on the join key --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- a simple join query with skew on both the tables on the join key --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -200,14 +194,10 @@ POSTHOOK: Input: default@t2 8 18 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- test outer joins also - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- test outer joins also - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -366,14 +356,10 @@ NULL NULL 5 15 8 18 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- an aggregation at the end should not change anything - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- an aggregation at the end should not change anything - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoinopt10.q.out b/ql/src/test/results/clientpositive/skewjoinopt10.q.out index deb0fb1..3c4dd51 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt10.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt10.q.out @@ -36,16 +36,10 @@ POSTHOOK: Input: default@t1 POSTHOOK: Output: default@array_valued_t1 POSTHOOK: Lineage: array_valued_t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: array_valued_t1.value EXPRESSION [(t1)t1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: -- This test is to verify the skew join compile optimization when the join is followed by a lateral view --- adding a order by at the end to make the results deterministic - -explain +PREHOOK: query: explain select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val PREHOOK: type: QUERY -POSTHOOK: query: -- This test is to verify the skew join compile optimization when the join is followed by a lateral view --- adding a order by at the end to make the results deterministic - -explain +POSTHOOK: query: explain select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoinopt11.q.out b/ql/src/test/results/clientpositive/skewjoinopt11.q.out index 430e22c..1455878 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt11.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt11.q.out @@ -32,12 +32,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- This test is to verify the skew join compile optimization when the join is followed --- by a union. Both sides of a union consist of a join, which should have used --- skew join compile time optimization. --- adding an order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN select * from ( select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key @@ -45,12 +40,7 @@ select * from select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key ) subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- This test is to verify the skew join compile optimization when the join is followed --- by a union. Both sides of a union consist of a join, which should have used --- skew join compile time optimization. --- adding an order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN select * from ( select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key diff --git a/ql/src/test/results/clientpositive/skewjoinopt12.q.out b/ql/src/test/results/clientpositive/skewjoinopt12.q.out index 355daa5..3a1899c 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt12.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt12.q.out @@ -34,18 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- Both the join tables are skewed by 2 keys, and one of the skewed values --- is common to both the tables. The join key matches the skewed key set. --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val PREHOOK: type: QUERY -POSTHOOK: query: -- Both the join tables are skewed by 2 keys, and one of the skewed values --- is common to both the tables. The join key matches the skewed key set. --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoinopt13.q.out b/ql/src/test/results/clientpositive/skewjoinopt13.q.out index 9b34e3f..4d2a474 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt13.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt13.q.out @@ -48,27 +48,13 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t3 -PREHOOK: query: -- This test is for skewed join compile time optimization for more than 2 tables. --- The join key for table 3 is different from the join key used for joining --- tables 1 and 2. Table 3 is skewed, but since one of the join sources for table --- 3 consist of a sub-query which contains a join, the compile time skew join --- optimization is not performed --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN select * from T1 a join T2 b on a.key = b.key join T3 c on a.val = c.val PREHOOK: type: QUERY -POSTHOOK: query: -- This test is for skewed join compile time optimization for more than 2 tables. --- The join key for table 3 is different from the join key used for joining --- tables 1 and 2. Table 3 is skewed, but since one of the join sources for table --- 3 consist of a sub-query which contains a join, the compile time skew join --- optimization is not performed --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN select * from T1 a join T2 b on a.key = b.key diff --git a/ql/src/test/results/clientpositive/skewjoinopt14.q.out b/ql/src/test/results/clientpositive/skewjoinopt14.q.out index a412b5d..e5f8905 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt14.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt14.q.out @@ -50,29 +50,13 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t3 -PREHOOK: query: -- This test is for skewed join compile time optimization for more than 2 tables. --- The join key for table 3 is different from the join key used for joining --- tables 1 and 2. Tables 1 and 3 are skewed. Since one of the join sources for table --- 3 consist of a sub-query which contains a join, the compile time skew join --- optimization is not enabled for table 3, but it is used for the first join between --- tables 1 and 2 --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN select * from T1 a join T2 b on a.key = b.key join T3 c on a.val = c.val PREHOOK: type: QUERY -POSTHOOK: query: -- This test is for skewed join compile time optimization for more than 2 tables. --- The join key for table 3 is different from the join key used for joining --- tables 1 and 2. Tables 1 and 3 are skewed. Since one of the join sources for table --- 3 consist of a sub-query which contains a join, the compile time skew join --- optimization is not enabled for table 3, but it is used for the first join between --- tables 1 and 2 --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN select * from T1 a join T2 b on a.key = b.key diff --git a/ql/src/test/results/clientpositive/skewjoinopt16.q.out b/ql/src/test/results/clientpositive/skewjoinopt16.q.out index 4d388fc..b0b948c 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt16.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt16.q.out @@ -34,18 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- One of the tables is skewed by 2 columns, and the other table is --- skewed by one column. Ths join is performed on the both the columns --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val PREHOOK: type: QUERY -POSTHOOK: query: -- One of the tables is skewed by 2 columns, and the other table is --- skewed by one column. Ths join is performed on the both the columns --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoinopt17.q.out b/ql/src/test/results/clientpositive/skewjoinopt17.q.out index 8fb0885..a48f3f1 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt17.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt17.q.out @@ -34,22 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- One of the tables is skewed by 2 columns, and the other table is --- skewed by one column. Ths join is performed on the first skewed column --- The skewed value for the jon key is common to both the tables. --- In this case, the skewed join value is not repeated in the filter. --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- One of the tables is skewed by 2 columns, and the other table is --- skewed by one column. Ths join is performed on the first skewed column --- The skewed value for the jon key is common to both the tables. --- In this case, the skewed join value is not repeated in the filter. --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -258,18 +246,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- One of the tables is skewed by 2 columns, and the other table is --- skewed by one column. Ths join is performed on the both the columns --- In this case, the skewed join value is repeated in the filter. - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val PREHOOK: type: QUERY -POSTHOOK: query: -- One of the tables is skewed by 2 columns, and the other table is --- skewed by one column. Ths join is performed on the both the columns --- In this case, the skewed join value is repeated in the filter. - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoinopt18.q.out b/ql/src/test/results/clientpositive/skewjoinopt18.q.out index f3996cc..6090a4f 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt18.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt18.q.out @@ -14,13 +14,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmp POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@tmpt1 -PREHOOK: query: -- testing skew on other data types - int -CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) +PREHOOK: query: CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- testing skew on other data types - int -CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) +POSTHOOK: query: CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 @@ -34,18 +32,12 @@ POSTHOOK: Input: default@tmpt1 POSTHOOK: Output: default@t1 POSTHOOK: Lineage: t1.key EXPRESSION [(tmpt1)tmpt1.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: t1.val SIMPLE [(tmpt1)tmpt1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: -- Tke skewed column is same in both the tables, however it is --- INT in one of the tables, and STRING in the other table - -CREATE TABLE T2(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T2 -POSTHOOK: query: -- Tke skewed column is same in both the tables, however it is --- INT in one of the tables, and STRING in the other table - -CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -58,20 +50,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- Once HIVE-3445 is fixed, the compile time skew join optimization would be --- applicable here. Till the above jira is fixed, it would be performed as a --- regular join --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Once HIVE-3445 is fixed, the compile time skew join optimization would be --- applicable here. Till the above jira is fixed, it would be performed as a --- regular join --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoinopt19.q.out b/ql/src/test/results/clientpositive/skewjoinopt19.q.out index 11315a7..370c865 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt19.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt19.q.out @@ -34,20 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- add a test where the skewed key is also the bucketized key --- it should not matter, and the compile time skewed join --- optimization is performed --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- add a test where the skewed key is also the bucketized key --- it should not matter, and the compile time skewed join --- optimization is performed --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoinopt2.q.out b/ql/src/test/results/clientpositive/skewjoinopt2.q.out index 860687d..73e1f0c 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt2.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt2.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) SKEWED BY (key) ON ((2), (7)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) SKEWED BY (key) ON ((2), (7)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -38,22 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- a simple query with skew on both the tables on the join key --- multiple skew values are present for the skewed keys --- but the skewed values do not overlap. --- The join values are a superset of the skewed keys. --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val PREHOOK: type: QUERY -POSTHOOK: query: -- a simple query with skew on both the tables on the join key --- multiple skew values are present for the skewed keys --- but the skewed values do not overlap. --- The join values are a superset of the skewed keys. --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -203,14 +187,10 @@ POSTHOOK: Input: default@t2 3 13 3 13 8 18 8 18 8 18 8 18 -PREHOOK: query: -- test outer joins also - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a LEFT OUTER JOIN T2 b ON a.key = b.key and a.val = b.val PREHOOK: type: QUERY -POSTHOOK: query: -- test outer joins also - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a LEFT OUTER JOIN T2 b ON a.key = b.key and a.val = b.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -364,14 +344,10 @@ POSTHOOK: Input: default@t2 8 18 8 18 8 18 8 18 8 28 NULL NULL -PREHOOK: query: -- a group by at the end should not change anything - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.key, count(1) FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val group by a.key PREHOOK: type: QUERY -POSTHOOK: query: -- a group by at the end should not change anything - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.key, count(1) FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val group by a.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoinopt20.q.out b/ql/src/test/results/clientpositive/skewjoinopt20.q.out index e7ef2a2..3af77fc 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt20.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt20.q.out @@ -34,20 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- add a test where the skewed key is also the bucketized/sorted key --- it should not matter, and the compile time skewed join --- optimization is performed --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- add a test where the skewed key is also the bucketized/sorted key --- it should not matter, and the compile time skewed join --- optimization is performed --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoinopt21.q.out b/ql/src/test/results/clientpositive/skewjoinopt21.q.out index a7a7007..a20be63 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt21.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt21.q.out @@ -34,10 +34,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- a simple join query with skew on both the tables on the join key --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM (SELECT key as k, val as v FROM T1) a @@ -45,10 +42,7 @@ FROM (SELECT key as k, val as v FROM T2) b ON a.k = b.k PREHOOK: type: QUERY -POSTHOOK: query: -- a simple join query with skew on both the tables on the join key --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM (SELECT key as k, val as v FROM T1) a diff --git a/ql/src/test/results/clientpositive/skewjoinopt3.q.out b/ql/src/test/results/clientpositive/skewjoinopt3.q.out index 1b1c8f0..952a2bd 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt3.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt3.q.out @@ -34,20 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- a simple query with skew on both the tables. One of the skewed --- value is common to both the tables. The skewed value should not be --- repeated in the filter. --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- a simple query with skew on both the tables. One of the skewed --- value is common to both the tables. The skewed value should not be --- repeated in the filter. --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -204,14 +194,10 @@ POSTHOOK: Input: default@t2 8 18 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- test outer joins also - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- test outer joins also - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoinopt4.q.out b/ql/src/test/results/clientpositive/skewjoinopt4.q.out index 865f8b9..a9de160 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt4.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt4.q.out @@ -32,18 +32,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- only of the tables of the join (the left table of the join) is skewed --- the skewed filter would still be applied to both the tables --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- only of the tables of the join (the left table of the join) is skewed --- the skewed filter would still be applied to both the tables --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -200,12 +192,10 @@ POSTHOOK: Input: default@t2 8 18 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- the order of the join should not matter, just confirming -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T2 a JOIN T1 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- the order of the join should not matter, just confirming -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T2 a JOIN T1 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoinopt5.q.out b/ql/src/test/results/clientpositive/skewjoinopt5.q.out index f6743f0..1f88812 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt5.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt5.q.out @@ -34,18 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- One of the tables is skewed by 2 columns, and the other table is --- skewed by one column. Ths join is performed on the first skewed column --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- One of the tables is skewed by 2 columns, and the other table is --- skewed by one column. Ths join is performed on the first skewed column --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoinopt6.q.out b/ql/src/test/results/clientpositive/skewjoinopt6.q.out index eb75df4..611552b 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt6.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt6.q.out @@ -34,20 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- Both the join tables are skewed by 2 keys, and one of the skewed values --- is common to both the tables. The join key is a subset of the skewed key set: --- it only contains the first skewed key for both the tables --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Both the join tables are skewed by 2 keys, and one of the skewed values --- is common to both the tables. The join key is a subset of the skewed key set: --- it only contains the first skewed key for both the tables --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoinopt7.q.out b/ql/src/test/results/clientpositive/skewjoinopt7.q.out index b68205b..e982bba 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt7.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt7.q.out @@ -50,20 +50,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t3 -PREHOOK: query: -- This test is for validating skewed join compile time optimization for more than --- 2 tables. The join key is the same, and so a 3-way join would be performed. --- 2 of the 3 tables are skewed on the join key --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key PREHOOK: type: QUERY -POSTHOOK: query: -- This test is for validating skewed join compile time optimization for more than --- 2 tables. The join key is the same, and so a 3-way join would be performed. --- 2 of the 3 tables are skewed on the join key --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoinopt8.q.out b/ql/src/test/results/clientpositive/skewjoinopt8.q.out index 25f4971..caaeb58 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt8.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt8.q.out @@ -48,20 +48,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t3 -PREHOOK: query: -- This test is for validating skewed join compile time optimization for more than --- 2 tables. The join key is the same, and so a 3-way join would be performed. --- 1 of the 3 tables are skewed on the join key --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key PREHOOK: type: QUERY -POSTHOOK: query: -- This test is for validating skewed join compile time optimization for more than --- 2 tables. The join key is the same, and so a 3-way join would be performed. --- 1 of the 3 tables are skewed on the join key --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/skewjoinopt9.q.out b/ql/src/test/results/clientpositive/skewjoinopt9.q.out index 725e957..2af317e 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt9.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt9.q.out @@ -32,10 +32,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- no skew join compile time optimization would be performed if one of the --- join sources is a sub-query consisting of a union all --- adding a order by at the end to make the results deterministic -EXPLAIN +PREHOOK: query: EXPLAIN select * from ( select key, val from T1 @@ -44,10 +41,7 @@ select key, val from T1 ) subq1 join T2 b on subq1.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- no skew join compile time optimization would be performed if one of the --- join sources is a sub-query consisting of a union all --- adding a order by at the end to make the results deterministic -EXPLAIN +POSTHOOK: query: EXPLAIN select * from ( select key, val from T1 @@ -175,18 +169,14 @@ POSTHOOK: Input: default@t2 8 28 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- no skew join compile time optimization would be performed if one of the --- join sources is a sub-query consisting of a group by -EXPLAIN +PREHOOK: query: EXPLAIN select * from ( select key, count(1) as cnt from T1 group by key ) subq1 join T2 b on subq1.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- no skew join compile time optimization would be performed if one of the --- join sources is a sub-query consisting of a group by -EXPLAIN +POSTHOOK: query: EXPLAIN select * from ( select key, count(1) as cnt from T1 group by key diff --git a/ql/src/test/results/clientpositive/smb_mapjoin9.q.out b/ql/src/test/results/clientpositive/smb_mapjoin9.q.out index 45b1884..25fd23f 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin9.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin9.q.out @@ -14,15 +14,13 @@ POSTHOOK: query: create table hive_test_smb_bucket2 (key int, value string) part POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@hive_test_smb_bucket2 -PREHOOK: query: -- empty partitions (HIVE-3205) -explain extended +PREHOOK: query: explain extended SELECT /* + MAPJOIN(b) */ b.key as k1, b.value, b.ds, a.key as k2 FROM hive_test_smb_bucket1 a JOIN hive_test_smb_bucket2 b ON a.key = b.key WHERE a.ds = '2010-10-15' and b.ds='2010-10-15' and b.key IS NOT NULL PREHOOK: type: QUERY -POSTHOOK: query: -- empty partitions (HIVE-3205) -explain extended +POSTHOOK: query: explain extended SELECT /* + MAPJOIN(b) */ b.key as k1, b.value, b.ds, a.key as k2 FROM hive_test_smb_bucket1 a JOIN hive_test_smb_bucket2 b diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_1.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_1.q.out index 64cafbe..40df1c3 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_1.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_1.q.out @@ -46,14 +46,10 @@ POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwr POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@smb_bucket_3 -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_10.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_10.q.out index 85b8a6e..50706f4 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_10.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_10.q.out @@ -20,15 +20,11 @@ POSTHOOK: query: alter table tmp_smb_bucket_10 add partition (ds = '2') POSTHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: Output: default@tmp_smb_bucket_10 POSTHOOK: Output: default@tmp_smb_bucket_10@ds=2 -PREHOOK: query: -- add dummy files to make sure that the number of files in each partition is same as number of buckets - -load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1') +PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1') PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@tmp_smb_bucket_10@ds=1 -POSTHOOK: query: -- add dummy files to make sure that the number of files in each partition is same as number of buckets - -load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1') +POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1') POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@tmp_smb_bucket_10@ds=1 diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out index 30dbc70..abc67e6 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- This test verifies that the output of a sort merge join on 2 partitions (one on each side of the join) is bucketed - --- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- This test verifies that the output of a sort merge join on 2 partitions (one on each side of the join) is bucketed - --- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table1 @@ -38,22 +32,18 @@ POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSch POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Create a bucketed table -CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS +PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table3 -POSTHOOK: query: -- Create a bucketed table -CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS +POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table3 -PREHOOK: query: -- Insert data into the bucketed table by joining the two bucketed and sorted tables, bucketing is not enforced -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by joining the two bucketed and sorted tables, bucketing is not enforced -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -2146,16 +2136,14 @@ POSTHOOK: Input: default@test_table3@ds=1 65 val_65 1 33 val_33 1 17 val_17 1 -PREHOOK: query: -- Join data from a sampled bucket to verify the data is bucketed -SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' +PREHOOK: query: SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' PREHOOK: type: QUERY PREHOOK: Input: default@test_table1 PREHOOK: Input: default@test_table1@ds=1 PREHOOK: Input: default@test_table3 PREHOOK: Input: default@test_table3@ds=1 #### A masked pattern was here #### -POSTHOOK: query: -- Join data from a sampled bucket to verify the data is bucketed -SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' +POSTHOOK: query: SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' POSTHOOK: type: QUERY POSTHOOK: Input: default@test_table1 POSTHOOK: Input: default@test_table1@ds=1 diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_12.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_12.q.out index 519faf6..29c1c24 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_12.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_12.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- This test verifies that the output of a sort merge join on 1 big partition with multiple small partitions is bucketed and sorted - --- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- This test verifies that the output of a sort merge join on 1 big partition with multiple small partitions is bucketed and sorted - --- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table1 @@ -50,22 +44,18 @@ POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSch POSTHOOK: Lineage: test_table2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Create a bucketed table -CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table3 -POSTHOOK: query: -- Create a bucketed table -CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table3 -PREHOOK: query: -- Insert data into the bucketed table by joining the two bucketed and sorted tables, bucketing is not enforced -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by joining the two bucketed and sorted tables, bucketing is not enforced -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -233,16 +223,14 @@ POSTHOOK: Input: default@test_table2@ds=3 POSTHOOK: Output: default@test_table3@ds=1 POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value SIMPLE [(test_table2)b.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: -- Join data from a sampled bucket to verify the data is bucketed -SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' +PREHOOK: query: SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' PREHOOK: type: QUERY PREHOOK: Input: default@test_table1 PREHOOK: Input: default@test_table1@ds=1 PREHOOK: Input: default@test_table3 PREHOOK: Input: default@test_table3@ds=1 #### A masked pattern was here #### -POSTHOOK: query: -- Join data from a sampled bucket to verify the data is bucketed -SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' +POSTHOOK: query: SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' POSTHOOK: type: QUERY POSTHOOK: Input: default@test_table1 POSTHOOK: Input: default@test_table1@ds=1 @@ -250,13 +238,11 @@ POSTHOOK: Input: default@test_table3 POSTHOOK: Input: default@test_table3@ds=1 #### A masked pattern was here #### 879 -PREHOOK: query: -- Join data from the sampled buckets of 2 tables to verify the data is bucketed and sorted -explain extended +PREHOOK: query: explain extended INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1' PREHOOK: type: QUERY -POSTHOOK: query: -- Join data from the sampled buckets of 2 tables to verify the data is bucketed and sorted -explain extended +POSTHOOK: query: explain extended INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1' POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_13.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_13.q.out index f0f8af8..ad6bfbf 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_13.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_13.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- This test verifies that the sort merge join optimizer works when the tables are joined on columns with different names - --- Create bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- This test verifies that the sort merge join optimizer works when the tables are joined on columns with different names - --- Create bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table1 @@ -66,14 +60,10 @@ POSTHOOK: Lineage: test_table3.key EXPRESSION [(src)src.FieldSchema(name:key, ty POSTHOOK: Lineage: test_table3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: test_table4.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Join data from 2 tables on their respective sorted columns (one each, with different names) and --- verify sort merge join is used -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Join data from 2 tables on their respective sorted columns (one each, with different names) and --- verify sort merge join is used -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -221,14 +211,10 @@ POSTHOOK: Input: default@test_table2 0 val_0 0 val_0 0 val_0 0 val_0 2 val_2 2 val_2 -PREHOOK: query: -- Join data from 2 tables on their respective columns (two each, with the same names but sorted --- with different priorities) and verify sort merge join is not used -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+mapjoin(b)*/ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Join data from 2 tables on their respective columns (two each, with the same names but sorted --- with different priorities) and verify sort merge join is not used -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+mapjoin(b)*/ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_16.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_16.q.out index f4d42a6..ab2b323 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_16.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_16.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Create bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table1 @@ -34,12 +32,10 @@ POSTHOOK: Lineage: test_table1.key EXPRESSION [(src)src.FieldSchema(name:key, ty POSTHOOK: Lineage: test_table1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: test_table2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Mapjoin followed by a aggregation should be performed in a single MR job -EXPLAIN +PREHOOK: query: EXPLAIN SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Mapjoin followed by a aggregation should be performed in a single MR job -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_2.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_2.q.out index 17ea01f..7840905 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_2.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_2.q.out @@ -46,14 +46,10 @@ POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwr POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@smb_bucket_3 -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_20.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_20.q.out index 0ad9a12..58c382f 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_20.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_20.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key int, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1 (key int, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key int, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key int, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -32,15 +30,11 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table1@ds=1 POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- with different datatypes. This should be a map-reduce operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- with different datatypes. This should be a map-reduce operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY @@ -151,15 +145,11 @@ CLUSTERED BY (value1) SORTED BY (value1) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table3 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation, although the bucketing positions dont match -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.value, a.key, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation, although the bucketing positions dont match -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.value, a.key, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY @@ -271,15 +261,11 @@ POSTHOOK: Input: default@test_table3 POSTHOOK: Input: default@test_table3@ds=1 #### A masked pattern was here #### 253 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- However, since an expression is being selected, it should involve a reducer -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT a.key+a.key, a.value, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- However, since an expression is being selected, it should involve a reducer -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT a.key+a.key, a.value, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_21.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_21.q.out index 319fef3..c0fdfd3 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_21.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_21.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -32,15 +30,11 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table1@ds=1 POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY @@ -102,15 +96,11 @@ CLUSTERED BY (key) SORTED BY (key desc) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table2 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since the sort orders does not match -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since the sort orders does not match -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY @@ -183,15 +173,11 @@ CLUSTERED BY (key) SORTED BY (key, value) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table2 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since the sort columns do not match -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since the sort columns do not match -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY @@ -263,15 +249,11 @@ CLUSTERED BY (key) SORTED BY (value) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table2 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since the sort columns do not match -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since the sort columns do not match -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY @@ -344,15 +326,11 @@ CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table2 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since the number of buckets do not match -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since the number of buckets do not match -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY @@ -425,15 +403,11 @@ CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table2 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since sort columns do not match -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since sort columns do not match -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_22.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_22.q.out index 8f1dbd6..36e8792 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_22.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_22.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -32,14 +30,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table1 POSTHOOK: Lineage: test_table1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN INSERT OVERWRITE TABLE test_table2 +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 SELECT * FROM test_table1 PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN INSERT OVERWRITE TABLE test_table2 +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 SELECT * FROM test_table1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -176,14 +170,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table1 POSTHOOK: Lineage: test_table1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN INSERT OVERWRITE TABLE test_table2 +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 SELECT * FROM test_table1 PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN INSERT OVERWRITE TABLE test_table2 +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 SELECT * FROM test_table1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_25.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_25.q.out index aba899e..f72c2a7 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_25.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_25.q.out @@ -184,20 +184,10 @@ Warning: Map Join MAPJOIN[33][bigTable=?] in task 'Stage-6:MAPRED' is a cross pr Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product Warning: Map Join MAPJOIN[35][bigTable=?] in task 'Stage-9:MAPRED' is a cross product Warning: Map Join MAPJOIN[36][bigTable=?] in task 'Stage-10:MAPRED' is a cross product -PREHOOK: query: -- explain --- select * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key left outer join src c on a.key=c.value - --- select a.key from smb_bucket_1 a - -explain +PREHOOK: query: explain select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 PREHOOK: type: QUERY -POSTHOOK: query: -- explain --- select * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key left outer join src c on a.key=c.value - --- select a.key from smb_bucket_1 a - -explain +POSTHOOK: query: explain select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_3.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_3.q.out index d32d181..cda600b 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_3.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_3.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - - - -create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +PREHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@smb_bucket_1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - - - -create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +POSTHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@smb_bucket_1 diff --git a/ql/src/test/results/clientpositive/sort.q.out b/ql/src/test/results/clientpositive/sort.q.out index 5fcf752..8683a52 100644 --- a/ql/src/test/results/clientpositive/sort.q.out +++ b/ql/src/test/results/clientpositive/sort.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT x.* FROM SRC x SORT BY key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT x.* FROM SRC x SORT BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/sort_merge_join_desc_1.q.out b/ql/src/test/results/clientpositive/sort_merge_join_desc_1.q.out index 25cbd68..c108fca 100644 --- a/ql/src/test/results/clientpositive/sort_merge_join_desc_1.q.out +++ b/ql/src/test/results/clientpositive/sort_merge_join_desc_1.q.out @@ -42,16 +42,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@table_desc2 POSTHOOK: Lineage: table_desc2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: table_desc2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- The columns of the tables above are sorted in same descending order. --- So, sort merge join should be performed - -explain +PREHOOK: query: explain select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b on a.key=b.key where a.key < 10 PREHOOK: type: QUERY -POSTHOOK: query: -- The columns of the tables above are sorted in same descending order. --- So, sort merge join should be performed - -explain +POSTHOOK: query: explain select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b on a.key=b.key where a.key < 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/sort_merge_join_desc_2.q.out b/ql/src/test/results/clientpositive/sort_merge_join_desc_2.q.out index eb29c86..0c64cbd 100644 --- a/ql/src/test/results/clientpositive/sort_merge_join_desc_2.q.out +++ b/ql/src/test/results/clientpositive/sort_merge_join_desc_2.q.out @@ -46,19 +46,11 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@table_desc2 POSTHOOK: Lineage: table_desc2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: table_desc2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- The columns of the tables above are sorted in same order. --- descending followed by descending --- So, sort merge join should be performed - -explain +PREHOOK: query: explain select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b on a.key=b.key and a.value=b.value where a.key < 10 PREHOOK: type: QUERY -POSTHOOK: query: -- The columns of the tables above are sorted in same order. --- descending followed by descending --- So, sort merge join should be performed - -explain +POSTHOOK: query: explain select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b on a.key=b.key and a.value=b.value where a.key < 10 POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/sort_merge_join_desc_3.q.out b/ql/src/test/results/clientpositive/sort_merge_join_desc_3.q.out index 599aabf..df743c0 100644 --- a/ql/src/test/results/clientpositive/sort_merge_join_desc_3.q.out +++ b/ql/src/test/results/clientpositive/sort_merge_join_desc_3.q.out @@ -46,19 +46,11 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@table_desc2 POSTHOOK: Lineage: table_desc2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: table_desc2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- The columns of the tables above are sorted in same orders. --- descending followed by ascending --- So, sort merge join should be performed - -explain +PREHOOK: query: explain select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b on a.key=b.key and a.value=b.value where a.key < 10 PREHOOK: type: QUERY -POSTHOOK: query: -- The columns of the tables above are sorted in same orders. --- descending followed by ascending --- So, sort merge join should be performed - -explain +POSTHOOK: query: explain select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b on a.key=b.key and a.value=b.value where a.key < 10 POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/sort_merge_join_desc_4.q.out b/ql/src/test/results/clientpositive/sort_merge_join_desc_4.q.out index 16fffcb..a807b8a 100644 --- a/ql/src/test/results/clientpositive/sort_merge_join_desc_4.q.out +++ b/ql/src/test/results/clientpositive/sort_merge_join_desc_4.q.out @@ -46,17 +46,11 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@table_desc2 POSTHOOK: Lineage: table_desc2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: table_desc2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- The columns of the tables above are sorted in different orders. --- So, sort merge join should not be performed - -explain +PREHOOK: query: explain select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b on a.key=b.key and a.value=b.value where a.key < 10 PREHOOK: type: QUERY -POSTHOOK: query: -- The columns of the tables above are sorted in different orders. --- So, sort merge join should not be performed - -explain +POSTHOOK: query: explain select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b on a.key=b.key and a.value=b.value where a.key < 10 POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/sort_merge_join_desc_5.q.out b/ql/src/test/results/clientpositive/sort_merge_join_desc_5.q.out index 687d892..3555e5e 100644 --- a/ql/src/test/results/clientpositive/sort_merge_join_desc_5.q.out +++ b/ql/src/test/results/clientpositive/sort_merge_join_desc_5.q.out @@ -46,16 +46,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) SORTED POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@srcbucket_mapjoin_part_2 POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: -- The partition sorting metadata matches but the table metadata does not, sorted merge join should still be used - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' AND b.part = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- The partition sorting metadata matches but the table metadata does not, sorted merge join should still be used - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' AND b.part = '1' diff --git a/ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out b/ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out index 06027d8..13d09a0 100644 --- a/ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out +++ b/ql/src/test/results/clientpositive/sort_merge_join_desc_6.q.out @@ -46,16 +46,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) SORTED POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@srcbucket_mapjoin_part_2 POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: -- The table sorting metadata matches but the partition metadata does not, sorted merge join should not be used - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' AND b.part = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- The table sorting metadata matches but the partition metadata does not, sorted merge join should not be used - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' AND b.part = '1' diff --git a/ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out b/ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out index 8bbcdd3..a11c65f 100644 --- a/ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out +++ b/ql/src/test/results/clientpositive/sort_merge_join_desc_7.q.out @@ -82,16 +82,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key, value) POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@srcbucket_mapjoin_part_2 POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: -- The table sorting metadata matches but the partition metadata does not, sorted merge join should not be used - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL PREHOOK: type: QUERY -POSTHOOK: query: -- The table sorting metadata matches but the partition metadata does not, sorted merge join should not be used - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL diff --git a/ql/src/test/results/clientpositive/sort_merge_join_desc_8.q.out b/ql/src/test/results/clientpositive/sort_merge_join_desc_8.q.out index 5f1b102..d9b88fd 100644 --- a/ql/src/test/results/clientpositive/sort_merge_join_desc_8.q.out +++ b/ql/src/test/results/clientpositive/sort_merge_join_desc_8.q.out @@ -95,15 +95,11 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@table_desc4 POSTHOOK: Lineage: table_desc4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: table_desc4.value2 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- columns are sorted by one key in first table, two keys in second table but in same sort order for key. Hence SMB join should pass - -explain +PREHOOK: query: explain select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b on a.key=b.key where a.key < 10 PREHOOK: type: QUERY -POSTHOOK: query: -- columns are sorted by one key in first table, two keys in second table but in same sort order for key. Hence SMB join should pass - -explain +POSTHOOK: query: explain select /*+ mapjoin(b) */ count(*) from table_desc1 a join table_desc2 b on a.key=b.key where a.key < 10 POSTHOOK: type: QUERY @@ -165,15 +161,11 @@ POSTHOOK: Input: default@table_desc1 POSTHOOK: Input: default@table_desc2 #### A masked pattern was here #### 22 -PREHOOK: query: -- columns are sorted by 3 keys(a, b, c) in first table, two keys(a, c) in second table with same sort order. Hence SMB join should not pass - -explain +PREHOOK: query: explain select /*+ mapjoin(b) */ count(*) from table_desc3 a join table_desc4 b on a.key=b.key and a.value2=b.value2 where a.key < 10 PREHOOK: type: QUERY -POSTHOOK: query: -- columns are sorted by 3 keys(a, b, c) in first table, two keys(a, c) in second table with same sort order. Hence SMB join should not pass - -explain +POSTHOOK: query: explain select /*+ mapjoin(b) */ count(*) from table_desc3 a join table_desc4 b on a.key=b.key and a.value2=b.value2 where a.key < 10 POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/add_part_multiple.q.out b/ql/src/test/results/clientpositive/spark/add_part_multiple.q.out index 0e6ac21..c452223 100644 --- a/ql/src/test/results/clientpositive/spark/add_part_multiple.q.out +++ b/ql/src/test/results/clientpositive/spark/add_part_multiple.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- HIVE-5122 locations for 2nd, 3rd... partition are ignored - -CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@add_part_test -POSTHOOK: query: -- HIVE-5122 locations for 2nd, 3rd... partition are ignored - -CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE add_part_test (key STRING, value STRING) PARTITIONED BY (ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@add_part_test diff --git a/ql/src/test/results/clientpositive/spark/annotate_stats_join.q.out b/ql/src/test/results/clientpositive/spark/annotate_stats_join.q.out index 5a84f53..31ccc5c 100644 --- a/ql/src/test/results/clientpositive/spark/annotate_stats_join.q.out +++ b/ql/src/test/results/clientpositive/spark/annotate_stats_join.q.out @@ -118,41 +118,9 @@ POSTHOOK: query: analyze table loc compute statistics for columns state,locid,zi POSTHOOK: type: QUERY POSTHOOK: Input: default@loc #### A masked pattern was here #### -PREHOOK: query: -- number of rows --- emp - 48 --- dept - 6 --- loc - 8 - --- count distincts for relevant columns (since count distinct values are approximate in some cases count distint values will be greater than number of rows) --- emp.deptid - 3 --- emp.lastname - 6 --- emp.locid - 7 --- dept.deptid - 7 --- dept.deptname - 6 --- loc.locid - 7 --- loc.state - 6 - --- 2 relations, 1 attribute --- Expected output rows: (48*6)/max(3,7) = 41 -explain select * from emp e join dept d on (e.deptid = d.deptid) +PREHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid) PREHOOK: type: QUERY -POSTHOOK: query: -- number of rows --- emp - 48 --- dept - 6 --- loc - 8 - --- count distincts for relevant columns (since count distinct values are approximate in some cases count distint values will be greater than number of rows) --- emp.deptid - 3 --- emp.lastname - 6 --- emp.locid - 7 --- dept.deptid - 7 --- dept.deptname - 6 --- loc.locid - 7 --- loc.state - 6 - --- 2 relations, 1 attribute --- Expected output rows: (48*6)/max(3,7) = 41 -explain select * from emp e join dept d on (e.deptid = d.deptid) +POSTHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -225,13 +193,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- 2 relations, 2 attributes --- Expected output rows: (48*6)/(max(3,7) * max(6,6)) = 6 -explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname +PREHOOK: query: explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname PREHOOK: type: QUERY -POSTHOOK: query: -- 2 relations, 2 attributes --- Expected output rows: (48*6)/(max(3,7) * max(6,6)) = 6 -explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname +POSTHOOK: query: explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -377,13 +341,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- 2 relations, 3 attributes --- Expected output rows: (48*6)/(max(3,7) * max(6,6) * max(6,6)) = 1 -explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +PREHOOK: query: explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname PREHOOK: type: QUERY -POSTHOOK: query: -- 2 relations, 3 attributes --- Expected output rows: (48*6)/(max(3,7) * max(6,6) * max(6,6)) = 1 -explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +POSTHOOK: query: explain select * from emp,dept where emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -455,13 +415,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- 3 relations, 1 attribute --- Expected output rows: (48*6*48)/top2largest(3,7,3) = 658 -explain select * from emp e join dept d on (e.deptid = d.deptid) join emp e1 on (e.deptid = e1.deptid) +PREHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid) join emp e1 on (e.deptid = e1.deptid) PREHOOK: type: QUERY -POSTHOOK: query: -- 3 relations, 1 attribute --- Expected output rows: (48*6*48)/top2largest(3,7,3) = 658 -explain select * from emp e join dept d on (e.deptid = d.deptid) join emp e1 on (e.deptid = e1.deptid) +POSTHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid) join emp e1 on (e.deptid = e1.deptid) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -554,11 +510,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Expected output rows: (48*6*8)/top2largest(3,7,7) = 47 -explain select * from emp e join dept d on (e.deptid = d.deptid) join loc l on (e.deptid = l.locid) +PREHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid) join loc l on (e.deptid = l.locid) PREHOOK: type: QUERY -POSTHOOK: query: -- Expected output rows: (48*6*8)/top2largest(3,7,7) = 47 -explain select * from emp e join dept d on (e.deptid = d.deptid) join loc l on (e.deptid = l.locid) +POSTHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid) join loc l on (e.deptid = l.locid) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -651,13 +605,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- 3 relations and 2 attribute --- Expected output rows: (48*6*8)/top2largest(3,7,7)*top2largest(6,6,6) = 1 -explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc l on (e.deptid = l.locid and e.lastname = l.state) +PREHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc l on (e.deptid = l.locid and e.lastname = l.state) PREHOOK: type: QUERY -POSTHOOK: query: -- 3 relations and 2 attribute --- Expected output rows: (48*6*8)/top2largest(3,7,7)*top2largest(6,6,6) = 1 -explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc l on (e.deptid = l.locid and e.lastname = l.state) +POSTHOOK: query: explain select * from emp e join dept d on (e.deptid = d.deptid and e.lastname = d.deptname) join loc l on (e.deptid = l.locid and e.lastname = l.state) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -749,11 +699,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- left outer join -explain select * from emp left outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +PREHOOK: query: explain select * from emp left outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname PREHOOK: type: QUERY -POSTHOOK: query: -- left outer join -explain select * from emp left outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +POSTHOOK: query: explain select * from emp left outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -819,11 +767,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- left semi join -explain select * from emp left semi join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +PREHOOK: query: explain select * from emp left semi join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname PREHOOK: type: QUERY -POSTHOOK: query: -- left semi join -explain select * from emp left semi join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +POSTHOOK: query: explain select * from emp left semi join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -900,11 +846,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- right outer join -explain select * from emp right outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +PREHOOK: query: explain select * from emp right outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname PREHOOK: type: QUERY -POSTHOOK: query: -- right outer join -explain select * from emp right outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +POSTHOOK: query: explain select * from emp right outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -970,11 +914,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- full outer join -explain select * from emp full outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +PREHOOK: query: explain select * from emp full outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname PREHOOK: type: QUERY -POSTHOOK: query: -- full outer join -explain select * from emp full outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname +POSTHOOK: query: explain select * from emp full outer join dept on emp.deptid = dept.deptid and emp.lastname = dept.deptname and dept.deptname = emp.lastname POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/auto_join0.q.out b/ql/src/test/results/clientpositive/spark/auto_join0.q.out index 5feb4fc..bc9c5db 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join0.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join0.q.out @@ -1,7 +1,5 @@ Warning: Map Join MAPJOIN[24][bigTable=?] in task 'Stage-1:MAPRED' is a cross product -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain select sum(hash(a.k1,a.v1,a.k2, a.v2)) from ( SELECT src1.key as k1, src1.value as v1, @@ -12,9 +10,7 @@ SELECT src1.key as k1, src1.value as v1, SORT BY k1, v1, k2, v2 ) a PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain select sum(hash(a.k1,a.v1,a.k2, a.v2)) from ( SELECT src1.key as k1, src1.value as v1, diff --git a/ql/src/test/results/clientpositive/spark/auto_join1.q.out b/ql/src/test/results/clientpositive/spark/auto_join1.q.out index 154da7d..d9cd770 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join1.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/spark/auto_join14.q.out b/ql/src/test/results/clientpositive/spark/auto_join14.q.out index 209eeeb..82deefe 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join14.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join14.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) - -CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) - -CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/auto_join21.q.out b/ql/src/test/results/clientpositive/spark/auto_join21.q.out index c0a1a4a..7dedc79 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join21.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join21.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/auto_join23.q.out b/ql/src/test/results/clientpositive/spark/auto_join23.q.out index 2fc07c0..7256539 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join23.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join23.q.out @@ -1,12 +1,8 @@ Warning: Map Join MAPJOIN[12][bigTable=?] in task 'Stage-1:MAPRED' is a cross product -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain SELECT * FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 SORT BY src1.key, src1.value, src2.key, src2.value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain SELECT * FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 SORT BY src1.key, src1.value, src2.key, src2.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/auto_join26.q.out b/ql/src/test/results/clientpositive/spark/auto_join26.q.out index d750586..a0deaab 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join26.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join26.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, cnt INT) +PREHOOK: query: CREATE TABLE dest_j1(key INT, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, cnt INT) +POSTHOOK: query: CREATE TABLE dest_j1(key INT, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/spark/auto_join_reordering_values.q.out b/ql/src/test/results/clientpositive/spark/auto_join_reordering_values.q.out index 2445f9d..83ea37b 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join_reordering_values.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join_reordering_values.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- HIVE-5056 RS has expression list for values, but it's ignored in MapJoinProcessor - -create table testsrc ( `key` int,`val` string) +PREHOOK: query: create table testsrc ( `key` int,`val` string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@testsrc -POSTHOOK: query: -- HIVE-5056 RS has expression list for values, but it's ignored in MapJoinProcessor - -create table testsrc ( `key` int,`val` string) +POSTHOOK: query: create table testsrc ( `key` int,`val` string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@testsrc diff --git a/ql/src/test/results/clientpositive/spark/auto_join_stats.q.out b/ql/src/test/results/clientpositive/spark/auto_join_stats.q.out index 8441baa..db98adc 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join_stats.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join_stats.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- Setting HTS(src2) < threshold < HTS(src2) + HTS(smalltable). --- This query plan should thus not try to combine the mapjoin into a single work. - -create table smalltable(key string, value string) stored as textfile +PREHOOK: query: create table smalltable(key string, value string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@smalltable -POSTHOOK: query: -- Setting HTS(src2) < threshold < HTS(src2) + HTS(smalltable). --- This query plan should thus not try to combine the mapjoin into a single work. - -create table smalltable(key string, value string) stored as textfile +POSTHOOK: query: create table smalltable(key string, value string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@smalltable diff --git a/ql/src/test/results/clientpositive/spark/auto_join_stats2.q.out b/ql/src/test/results/clientpositive/spark/auto_join_stats2.q.out index 8876acd..9064cbc 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join_stats2.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join_stats2.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- Auto_join2 no longer tests merging the mapjoin work if big-table selection is based on stats, as src3 is smaller statistically than src1 + src2. --- Hence forcing the third table to be smaller. - -create table smalltable(key string, value string) stored as textfile +PREHOOK: query: create table smalltable(key string, value string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@smalltable -POSTHOOK: query: -- Auto_join2 no longer tests merging the mapjoin work if big-table selection is based on stats, as src3 is smaller statistically than src1 + src2. --- Hence forcing the third table to be smaller. - -create table smalltable(key string, value string) stored as textfile +POSTHOOK: query: create table smalltable(key string, value string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@smalltable diff --git a/ql/src/test/results/clientpositive/spark/auto_join_without_localtask.q.out b/ql/src/test/results/clientpositive/spark/auto_join_without_localtask.q.out index 00c29a9..d841f09 100644 --- a/ql/src/test/results/clientpositive/spark/auto_join_without_localtask.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_join_without_localtask.q.out @@ -486,13 +486,11 @@ RUN: Stage-1:MAPRED 119 val_119 119 val_119 119 val_119 -PREHOOK: query: -- fallback to common join -select a.* from src a join src b on a.key=b.key join src c on a.value=c.value where a.key>100 order by a.key, a.value limit 40 +PREHOOK: query: select a.* from src a join src b on a.key=b.key join src c on a.value=c.value where a.key>100 order by a.key, a.value limit 40 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- fallback to common join -select a.* from src a join src b on a.key=b.key join src c on a.value=c.value where a.key>100 order by a.key, a.value limit 40 +POSTHOOK: query: select a.* from src a join src b on a.key=b.key join src c on a.value=c.value where a.key>100 order by a.key, a.value limit 40 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/auto_smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/spark/auto_smb_mapjoin_14.q.out index 0e99972..04fd977 100644 --- a/ql/src/test/results/clientpositive/spark/auto_smb_mapjoin_14.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_smb_mapjoin_14.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tbl1 @@ -42,14 +38,12 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@tbl2 POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 @@ -129,8 +123,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 22 -PREHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select key, count(*) from @@ -140,8 +133,7 @@ select count(*) from group by key ) subq2 PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select key, count(*) from @@ -257,9 +249,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 6 -PREHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them. --- Each sub-query should be converted to a sort-merge join. -explain +PREHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -276,9 +266,7 @@ join ) src2 on src1.key = src2.key PREHOOK: type: QUERY -POSTHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them. --- Each sub-query should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -440,18 +428,14 @@ POSTHOOK: Input: default@tbl2 5 9 9 8 1 1 9 1 1 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -537,9 +521,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join, although there is more than one level of sub-query -explain +PREHOOK: query: explain select count(*) from ( select * from @@ -551,9 +533,7 @@ select count(*) from join tbl2 b on subq2.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join, although there is more than one level of sub-query -explain +POSTHOOK: query: explain select count(*) from ( select * from @@ -654,9 +634,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. --- The join should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select * from @@ -675,9 +653,7 @@ select count(*) from ) subq4 on subq2.key = subq4.key PREHOOK: type: QUERY -POSTHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. --- The join should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select * from @@ -797,20 +773,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl1 #### A masked pattern was here #### 20 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key --- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one --- item, but that is not part of the join key. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 join (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key --- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one --- item, but that is not part of the join key. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 join @@ -896,18 +866,14 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side --- join should be performed -explain +PREHOOK: query: explain select count(*) from (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 join (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side --- join should be performed -explain +POSTHOOK: query: explain select count(*) from (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 join @@ -1018,16 +984,12 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 22 -PREHOOK: query: -- One of the tables is a sub-query and the other is not. --- It should be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join tbl2 a on subq1.key = a.key PREHOOK: type: QUERY -POSTHOOK: query: -- One of the tables is a sub-query and the other is not. --- It should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join tbl2 a on subq1.key = a.key @@ -1107,9 +1069,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. --- It should be converted to to a sort-merge join -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -1119,9 +1079,7 @@ select count(*) from (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 on (subq1.key = subq3.key) PREHOOK: type: QUERY -POSTHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. --- It should be converted to to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -1218,9 +1176,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 56 -PREHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that. --- The join should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select subq2.key as key, subq2.value as value1, b.value as value2 from ( @@ -1233,9 +1189,7 @@ select count(*) from ( join tbl2 b on subq2.key = b.key) a PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that. --- The join should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select subq2.key as key, subq2.value as value1, b.value as value2 from ( @@ -1355,18 +1309,14 @@ POSTHOOK: query: CREATE TABLE dest2(key int, val1 string, val2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest2 -PREHOOK: query: -- The join is followed by a multi-table insert. It should be converted to --- a sort-merge join -explain +PREHOOK: query: explain from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 insert overwrite table dest1 select key, val1 insert overwrite table dest2 select key, val1, val2 PREHOOK: type: QUERY -POSTHOOK: query: -- The join is followed by a multi-table insert. It should be converted to --- a sort-merge join -explain +POSTHOOK: query: explain from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 @@ -1553,18 +1503,14 @@ POSTHOOK: query: CREATE TABLE dest2(key int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest2 -PREHOOK: query: -- The join is followed by a multi-table insert, and one of the inserts involves a reducer. --- It should be converted to a sort-merge join -explain +PREHOOK: query: explain from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 insert overwrite table dest1 select key, val1 insert overwrite table dest2 select key, count(*) group by key PREHOOK: type: QUERY -POSTHOOK: query: -- The join is followed by a multi-table insert, and one of the inserts involves a reducer. --- It should be converted to a sort-merge join -explain +POSTHOOK: query: explain from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_1.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_1.q.out index e2be217..99664db 100644 --- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_1.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_1.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket - -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket - -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -103,11 +99,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.t POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_10.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_10.q.out index 65115ea..9c6bd7b 100644 --- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_10.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_10.q.out @@ -38,8 +38,7 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@tbl2 POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- One of the subqueries contains a union, so it should not be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from ( select * from @@ -51,8 +50,7 @@ select count(*) from (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- One of the subqueries contains a union, so it should not be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from ( select * from @@ -178,16 +176,14 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 40 -PREHOOK: query: -- One of the subqueries contains a groupby, so it should not be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, count(*) as value from tbl1 a where key < 6 group by a.key) subq1 join (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- One of the subqueries contains a groupby, so it should not be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, count(*) as value from tbl1 a where key < 6 group by a.key) subq1 join diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_12.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_12.q.out index 09a6d1c..4bd1bc8 100644 --- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_12.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_12.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket - -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 1 part, 2 bucket & big 2 part, 4 bucket - -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_13.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_13.q.out index fb07771..549e494 100644 --- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_13.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_13.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tbl1 @@ -54,8 +50,7 @@ POSTHOOK: query: CREATE TABLE dest2(k1 string, k2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest2 -PREHOOK: query: -- A SMB join followed by a mutli-insert -explain +PREHOOK: query: explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 FROM tbl1 a JOIN tbl2 b @@ -63,8 +58,7 @@ from ( INSERT OVERWRITE TABLE dest1 select key1, key2 INSERT OVERWRITE TABLE dest2 select value1, value2 PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join followed by a mutli-insert -explain +POSTHOOK: query: explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 FROM tbl1 a JOIN tbl2 b @@ -241,8 +235,7 @@ val_5 val_5 val_5 val_5 val_8 val_8 val_9 val_9 -PREHOOK: query: -- A SMB join followed by a mutli-insert -explain +PREHOOK: query: explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 FROM tbl1 a JOIN tbl2 b @@ -250,8 +243,7 @@ from ( INSERT OVERWRITE TABLE dest1 select key1, key2 INSERT OVERWRITE TABLE dest2 select value1, value2 PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join followed by a mutli-insert -explain +POSTHOOK: query: explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 FROM tbl1 a JOIN tbl2 b @@ -428,8 +420,7 @@ val_5 val_5 val_5 val_5 val_8 val_8 val_9 val_9 -PREHOOK: query: -- A SMB join followed by a mutli-insert -explain +PREHOOK: query: explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 FROM tbl1 a JOIN tbl2 b @@ -437,8 +428,7 @@ from ( INSERT OVERWRITE TABLE dest1 select key1, key2 INSERT OVERWRITE TABLE dest2 select value1, value2 PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join followed by a mutli-insert -explain +POSTHOOK: query: explain from ( SELECT a.key key1, a.value value1, b.key key2, b.value value2 FROM tbl1 a JOIN tbl2 b diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_14.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_14.q.out index 1d3dae8..8116593 100644 --- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_14.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_14.q.out @@ -34,12 +34,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@tbl2 POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Since tbl1 is the bigger table, tbl1 Left Outer Join tbl2 can be performed -explain +PREHOOK: query: explain select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since tbl1 is the bigger table, tbl1 Left Outer Join tbl2 can be performed -explain +POSTHOOK: query: explain select count(*) FROM tbl1 a LEFT OUTER JOIN tbl2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -145,12 +143,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@tbl2 POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Since tbl2 is the bigger table, tbl1 Right Outer Join tbl2 can be performed -explain +PREHOOK: query: explain select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since tbl2 is the bigger table, tbl1 Right Outer Join tbl2 can be performed -explain +POSTHOOK: query: explain select count(*) FROM tbl1 a RIGHT OUTER JOIN tbl2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_16.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_16.q.out index d4ecb19..cb8564f 100644 --- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_16.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_16.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE stage_bucket_big +PREHOOK: query: CREATE TABLE stage_bucket_big ( key BIGINT, value STRING @@ -9,9 +7,7 @@ PARTITIONED BY (file_tag STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@stage_bucket_big -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE stage_bucket_big +POSTHOOK: query: CREATE TABLE stage_bucket_big ( key BIGINT, value STRING diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_3.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_3.q.out index e09df8c..fae5dc6 100644 --- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_3.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_3.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small 2 part, 2 bucket & big 1 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 2 part, 2 bucket & big 1 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small @@ -83,11 +81,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.t POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_4.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_4.q.out index a59c8a4..9604abb 100644 --- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_4.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_4.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small 2 part, 4 bucket & big 1 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 2 part, 4 bucket & big 1 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small @@ -99,11 +97,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.t POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@bucket_big@ds=2008-04-08 -PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_5.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_5.q.out index 04e5f40..517535d 100644 --- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_5.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_5.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- small no part, 4 bucket & big no part, 2 bucket - --- SORT_QUERY_RESULTS - -CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small no part, 4 bucket & big no part, 2 bucket - --- SORT_QUERY_RESULTS - -CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small @@ -70,11 +62,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.t POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@bucket_big -PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_6.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_6.q.out index 7aeef79..de08021 100644 --- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_6.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_6.q.out @@ -70,23 +70,9 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@tbl4 POSTHOOK: Lineage: tbl4.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tbl4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on a different key - --- Three tests below are all the same query with different alias, which changes dispatch order of GenMapRedWalker --- This is dependent to iteration order of HashMap, so can be meaningless in non-sun jdk --- b = TS[0]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8]-SEL[9]-FS[10] --- c = TS[1]-RS[7]-JOIN[8] --- a = TS[2]-MAPJOIN[11] -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on a different key - --- Three tests below are all the same query with different alias, which changes dispatch order of GenMapRedWalker --- This is dependent to iteration order of HashMap, so can be meaningless in non-sun jdk --- b = TS[0]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8]-SEL[9]-FS[10] --- c = TS[1]-RS[7]-JOIN[8] --- a = TS[2]-MAPJOIN[11] -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -194,15 +180,9 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 2654 -PREHOOK: query: -- d = TS[0]-RS[7]-JOIN[8]-SEL[9]-FS[10] --- b = TS[1]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8] --- a = TS[2]-MAPJOIN[11] -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src d on d.value = a.value +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src d on d.value = a.value PREHOOK: type: QUERY -POSTHOOK: query: -- d = TS[0]-RS[7]-JOIN[8]-SEL[9]-FS[10] --- b = TS[1]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8] --- a = TS[2]-MAPJOIN[11] -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src d on d.value = a.value +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src d on d.value = a.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -310,15 +290,9 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 2654 -PREHOOK: query: -- b = TS[0]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8]-SEL[9]-FS[10] --- a = TS[1]-MAPJOIN[11] --- h = TS[2]-RS[7]-JOIN[8] -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src h on h.value = a.value +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src h on h.value = a.value PREHOOK: type: QUERY -POSTHOOK: query: -- b = TS[0]-OP[13]-MAPJOIN[11]-RS[6]-JOIN[8]-SEL[9]-FS[10] --- a = TS[1]-MAPJOIN[11] --- h = TS[2]-RS[7]-JOIN[8] -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src h on h.value = a.value +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src h on h.value = a.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -426,11 +400,9 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 2654 -PREHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -538,11 +510,9 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 2654 -PREHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -619,11 +589,9 @@ POSTHOOK: Input: default@tbl2 POSTHOOK: Input: default@tbl3 #### A masked pattern was here #### 2654 -PREHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on a different key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on a different key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -731,11 +699,9 @@ POSTHOOK: Input: default@tbl2 POSTHOOK: Input: default@tbl4 #### A masked pattern was here #### 2654 -PREHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on a different key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on a different key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.value = a.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -843,11 +809,9 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 2654 -PREHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join is being followed by a regular join on a non-bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join src c on c.key = a.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -955,11 +919,9 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 2654 -PREHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on the same key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl3 c on c.key = a.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1036,11 +998,9 @@ POSTHOOK: Input: default@tbl2 POSTHOOK: Input: default@tbl3 #### A masked pattern was here #### 2654 -PREHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on a different key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value +PREHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value PREHOOK: type: QUERY -POSTHOOK: query: -- A SMB join is being followed by a regular join on a bucketed table on a different key -explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value +POSTHOOK: query: explain select count(*) FROM tbl1 a JOIN tbl2 b ON a.key = b.key join tbl4 c on c.value = a.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out index 6f8307b..757237d 100644 --- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_7.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small 2 part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 2 part, 4 bucket & big 2 part, 2 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small @@ -116,11 +114,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket2outof4.t POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_8.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_8.q.out index 51f71c0..4b6f052 100644 --- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_8.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_8.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- small 2 part, 2 bucket & big 2 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket_small -POSTHOOK: query: -- small 2 part, 2 bucket & big 2 part, 4 bucket -CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket_small (key string, value string) partitioned by (ds string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket_small @@ -116,11 +114,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcsortbucket4outof4.t POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@bucket_big@ds=2008-04-09 -PREHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since size is being used to find the big table, the order of the tables in the join does not matter -explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_small a JOIN bucket_big b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -530,11 +526,9 @@ POSTHOOK: Input: default@bucket_small@ds=2008-04-08 POSTHOOK: Input: default@bucket_small@ds=2008-04-09 #### A masked pattern was here #### 76 -PREHOOK: query: -- The mapjoin should fail resulting in the sort-merge join -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +PREHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- The mapjoin should fail resulting in the sort-merge join -explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key +POSTHOOK: query: explain extended select count(*) FROM bucket_big a JOIN bucket_small b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_9.q.out b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_9.q.out index 6ea612a..2b09fcf 100644 --- a/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_9.q.out +++ b/ql/src/test/results/clientpositive/spark/auto_sortmerge_join_9.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tbl1 @@ -42,14 +38,12 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@tbl2 POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 @@ -129,16 +123,14 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 22 -PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select key, count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 group by key PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select key, count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key @@ -234,8 +226,7 @@ POSTHOOK: Input: default@tbl2 5 9 8 1 9 1 -PREHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select key, count(*) from @@ -245,8 +236,7 @@ select count(*) from group by key ) subq2 PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select key, count(*) from @@ -362,9 +352,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 6 -PREHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them. --- Each sub-query should be converted to a sort-merge join. -explain +PREHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -381,9 +369,7 @@ join ) src2 on src1.key = src2.key PREHOOK: type: QUERY -POSTHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them. --- Each sub-query should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -579,18 +565,14 @@ POSTHOOK: Input: default@tbl2 5 9 9 8 1 1 9 1 1 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -676,9 +658,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join, although there is more than one level of sub-query -explain +PREHOOK: query: explain select count(*) from ( select * from @@ -690,9 +670,7 @@ select count(*) from join tbl2 b on subq2.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join, although there is more than one level of sub-query -explain +POSTHOOK: query: explain select count(*) from ( select * from @@ -793,9 +771,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. --- The join should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select * from @@ -814,9 +790,7 @@ select count(*) from ) subq4 on subq2.key = subq4.key PREHOOK: type: QUERY -POSTHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. --- The join should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select * from @@ -936,20 +910,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl1 #### A masked pattern was here #### 20 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key --- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one --- item, but that is not part of the join key. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 join (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key --- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one --- item, but that is not part of the join key. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 join @@ -1035,18 +1003,14 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized mapside --- join should be performed -explain +PREHOOK: query: explain select count(*) from (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 join (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized mapside --- join should be performed -explain +POSTHOOK: query: explain select count(*) from (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 join @@ -1160,16 +1124,12 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 22 -PREHOOK: query: -- The left table is a sub-query and the right table is not. --- It should be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join tbl2 a on subq1.key = a.key PREHOOK: type: QUERY -POSTHOOK: query: -- The left table is a sub-query and the right table is not. --- It should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join tbl2 a on subq1.key = a.key @@ -1249,17 +1209,13 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- The right table is a sub-query and the left table is not. --- It should be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from tbl1 a join (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 on a.key = subq1.key PREHOOK: type: QUERY -POSTHOOK: query: -- The right table is a sub-query and the left table is not. --- It should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from tbl1 a join (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 @@ -1342,9 +1298,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. --- It should be converted to to a sort-merge join -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -1354,9 +1308,7 @@ select count(*) from (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 on (subq1.key = subq3.key) PREHOOK: type: QUERY -POSTHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. --- It should be converted to to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -1453,9 +1405,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 56 -PREHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that. --- The join should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select subq2.key as key, subq2.value as value1, b.value as value2 from ( @@ -1468,9 +1418,7 @@ select count(*) from ( join tbl2 b on subq2.key = b.key) a PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that. --- The join should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select subq2.key as key, subq2.value as value1, b.value as value2 from ( @@ -1574,14 +1522,12 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 @@ -1689,16 +1635,14 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 22 -PREHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select key, count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 group by key PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed as part of sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select key, count(*) from ( select a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key @@ -1822,8 +1766,7 @@ POSTHOOK: Input: default@tbl2 5 9 8 1 9 1 -PREHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select key, count(*) from @@ -1833,8 +1776,7 @@ select count(*) from group by key ) subq2 PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed as part of more than one sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select key, count(*) from @@ -1978,9 +1920,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 6 -PREHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them. --- Each sub-query should be converted to a sort-merge join. -explain +PREHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -1997,9 +1937,7 @@ join ) src2 on src1.key = src2.key PREHOOK: type: QUERY -POSTHOOK: query: -- A join is being performed across different sub-queries, where a join is being performed in each of them. --- Each sub-query should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select src1.key, src1.cnt1, src2.cnt1 from ( select key, count(*) as cnt1 from @@ -2251,18 +2189,14 @@ POSTHOOK: Input: default@tbl2 5 9 9 8 1 1 9 1 1 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -2376,9 +2310,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join, although there is more than one level of sub-query -explain +PREHOOK: query: explain select count(*) from ( select * from @@ -2390,9 +2322,7 @@ select count(*) from join tbl2 b on subq2.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join, although there is more than one level of sub-query -explain +POSTHOOK: query: explain select count(*) from ( select * from @@ -2521,9 +2451,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. --- The join should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select * from @@ -2542,9 +2470,7 @@ select count(*) from ) subq4 on subq2.key = subq4.key PREHOOK: type: QUERY -POSTHOOK: query: -- Both the tables are nested sub-queries i.e more then 1 level of sub-query. --- The join should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select * from @@ -2692,20 +2618,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl1 #### A masked pattern was here #### 20 -PREHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key --- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one --- item, but that is not part of the join key. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 join (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being joined. Since the sub-query only contains selects and filters and the join key --- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one --- item, but that is not part of the join key. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 join @@ -2819,16 +2739,12 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- The left table is a sub-query and the right table is not. --- It should be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join tbl2 a on subq1.key = a.key PREHOOK: type: QUERY -POSTHOOK: query: -- The left table is a sub-query and the right table is not. --- It should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join tbl2 a on subq1.key = a.key @@ -2936,17 +2852,13 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- The right table is a sub-query and the left table is not. --- It should be converted to a sort-merge join. -explain +PREHOOK: query: explain select count(*) from tbl1 a join (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 on a.key = subq1.key PREHOOK: type: QUERY -POSTHOOK: query: -- The right table is a sub-query and the left table is not. --- It should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select count(*) from tbl1 a join (select a.key as key, a.value as value from tbl2 a where key < 6) subq1 @@ -3057,9 +2969,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. --- It should be converted to to a sort-merge join -explain +PREHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -3069,9 +2979,7 @@ select count(*) from (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 on (subq1.key = subq3.key) PREHOOK: type: QUERY -POSTHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. --- It should be converted to to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -3217,9 +3125,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 56 -PREHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that. --- The join should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select subq2.key as key, subq2.value as value1, b.value as value2 from ( @@ -3232,9 +3138,7 @@ select count(*) from ( join tbl2 b on subq2.key = b.key) a PREHOOK: type: QUERY -POSTHOOK: query: -- The join is being performed on a nested sub-query, and an aggregation is performed after that. --- The join should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select subq2.key as key, subq2.value as value1, b.value as value2 from ( diff --git a/ql/src/test/results/clientpositive/spark/avro_compression_enabled_native.q.out b/ql/src/test/results/clientpositive/spark/avro_compression_enabled_native.q.out index 687f17e..43cf190 100644 --- a/ql/src/test/results/clientpositive/spark/avro_compression_enabled_native.q.out +++ b/ql/src/test/results/clientpositive/spark/avro_compression_enabled_native.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- verify that new joins bring in correct schemas (including evolved schemas) - -CREATE TABLE doctors4 ( +PREHOOK: query: CREATE TABLE doctors4 ( number int, first_name string, last_name string, @@ -9,9 +7,7 @@ STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@doctors4 -POSTHOOK: query: -- verify that new joins bring in correct schemas (including evolved schemas) - -CREATE TABLE doctors4 ( +POSTHOOK: query: CREATE TABLE doctors4 ( number int, first_name string, last_name string, diff --git a/ql/src/test/results/clientpositive/spark/avro_joins.q.out b/ql/src/test/results/clientpositive/spark/avro_joins.q.out index b9f233b..24a14d5 100644 --- a/ql/src/test/results/clientpositive/spark/avro_joins.q.out +++ b/ql/src/test/results/clientpositive/spark/avro_joins.q.out @@ -1,8 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- verify that new joins bring in correct schemas (including evolved schemas) - -CREATE TABLE doctors4 +PREHOOK: query: CREATE TABLE doctors4 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS @@ -39,11 +35,7 @@ TBLPROPERTIES ('avro.schema.literal'='{ PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@doctors4 -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- verify that new joins bring in correct schemas (including evolved schemas) - -CREATE TABLE doctors4 +POSTHOOK: query: CREATE TABLE doctors4 ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' STORED AS diff --git a/ql/src/test/results/clientpositive/spark/avro_joins_native.q.out b/ql/src/test/results/clientpositive/spark/avro_joins_native.q.out index 09a1549..b2ece57 100644 --- a/ql/src/test/results/clientpositive/spark/avro_joins_native.q.out +++ b/ql/src/test/results/clientpositive/spark/avro_joins_native.q.out @@ -1,8 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- verify that new joins bring in correct schemas (including evolved schemas) - -CREATE TABLE doctors4 ( +PREHOOK: query: CREATE TABLE doctors4 ( number int COMMENT "Order of playing the role", first_name string COMMENT "first name of actor playing role", last_name string COMMENT "last name of actor playing role") @@ -10,11 +6,7 @@ STORED AS AVRO PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@doctors4 -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- verify that new joins bring in correct schemas (including evolved schemas) - -CREATE TABLE doctors4 ( +POSTHOOK: query: CREATE TABLE doctors4 ( number int COMMENT "Order of playing the role", first_name string COMMENT "first name of actor playing role", last_name string COMMENT "last name of actor playing role") diff --git a/ql/src/test/results/clientpositive/spark/bucket2.q.out b/ql/src/test/results/clientpositive/spark/bucket2.q.out index dd23a25..efb4150 100644 --- a/ql/src/test/results/clientpositive/spark/bucket2.q.out +++ b/ql/src/test/results/clientpositive/spark/bucket2.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket2_1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE bucket2_1(key int, value string) CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket2_1 diff --git a/ql/src/test/results/clientpositive/spark/bucket3.q.out b/ql/src/test/results/clientpositive/spark/bucket3.q.out index f4acd71..e09a46b 100644 --- a/ql/src/test/results/clientpositive/spark/bucket3.q.out +++ b/ql/src/test/results/clientpositive/spark/bucket3.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket3_1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE bucket3_1(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket3_1 diff --git a/ql/src/test/results/clientpositive/spark/bucket5.q.out b/ql/src/test/results/clientpositive/spark/bucket5.q.out index b5d8890..3d67a8b 100644 --- a/ql/src/test/results/clientpositive/spark/bucket5.q.out +++ b/ql/src/test/results/clientpositive/spark/bucket5.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- Tests that when a multi insert inserts into a bucketed table and a table which is not bucketed --- the bucketed table is not merged and the table which is not bucketed is - -CREATE TABLE bucketed_table(key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE bucketed_table(key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucketed_table -POSTHOOK: query: -- Tests that when a multi insert inserts into a bucketed table and a table which is not bucketed --- the bucketed table is not merged and the table which is not bucketed is - -CREATE TABLE bucketed_table(key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE bucketed_table(key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucketed_table @@ -414,24 +408,20 @@ POSTHOOK: Input: default@bucketed_table 17 val_17 19 val_19 27 val_27 -PREHOOK: query: -- Should be 2 (not merged) -SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM bucketed_table +PREHOOK: query: SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM bucketed_table PREHOOK: type: QUERY PREHOOK: Input: default@bucketed_table #### A masked pattern was here #### -POSTHOOK: query: -- Should be 2 (not merged) -SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM bucketed_table +POSTHOOK: query: SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM bucketed_table POSTHOOK: type: QUERY POSTHOOK: Input: default@bucketed_table #### A masked pattern was here #### 2 -PREHOOK: query: -- Should be 1 (merged) -SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM unbucketed_table +PREHOOK: query: SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM unbucketed_table PREHOOK: type: QUERY PREHOOK: Input: default@unbucketed_table #### A masked pattern was here #### -POSTHOOK: query: -- Should be 1 (merged) -SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM unbucketed_table +POSTHOOK: query: SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM unbucketed_table POSTHOOK: type: QUERY POSTHOOK: Input: default@unbucketed_table #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/bucket_map_join_1.q.out b/ql/src/test/results/clientpositive/spark/bucket_map_join_1.q.out index 5bdec32..fa1409d 100644 --- a/ql/src/test/results/clientpositive/spark/bucket_map_join_1.q.out +++ b/ql/src/test/results/clientpositive/spark/bucket_map_join_1.q.out @@ -42,18 +42,10 @@ POSTHOOK: query: load data local inpath '../../data/files/SortCol2Col1.txt' over POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@table2 -PREHOOK: query: -- The tables are bucketed in same columns in different order, --- but sorted in different column orders --- Neither bucketed map-join, nor sort-merge join should be performed - -explain extended +PREHOOK: query: explain extended select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value PREHOOK: type: QUERY -POSTHOOK: query: -- The tables are bucketed in same columns in different order, --- but sorted in different column orders --- Neither bucketed map-join, nor sort-merge join should be performed - -explain extended +POSTHOOK: query: explain extended select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/bucket_map_join_2.q.out b/ql/src/test/results/clientpositive/spark/bucket_map_join_2.q.out index 5ec1af9..cceec18 100644 --- a/ql/src/test/results/clientpositive/spark/bucket_map_join_2.q.out +++ b/ql/src/test/results/clientpositive/spark/bucket_map_join_2.q.out @@ -42,18 +42,10 @@ POSTHOOK: query: load data local inpath '../../data/files/SortCol2Col1.txt' over POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@table2 -PREHOOK: query: -- The tables are bucketed in same columns in different order, --- but sorted in different column orders --- Neither bucketed map-join, nor sort-merge join should be performed - -explain extended +PREHOOK: query: explain extended select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value PREHOOK: type: QUERY -POSTHOOK: query: -- The tables are bucketed in same columns in different order, --- but sorted in different column orders --- Neither bucketed map-join, nor sort-merge join should be performed - -explain extended +POSTHOOK: query: explain extended select /*+ mapjoin(b) */ count(*) from table1 a join table2 b on a.key=b.key and a.value=b.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/bucket_map_join_spark4.q.out b/ql/src/test/results/clientpositive/spark/bucket_map_join_spark4.q.out index 9cd87d2..97b666b 100644 --- a/ql/src/test/results/clientpositive/spark/bucket_map_join_spark4.q.out +++ b/ql/src/test/results/clientpositive/spark/bucket_map_join_spark4.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE tbl1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tbl1 diff --git a/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out index 3003b74..08d115d 100644 --- a/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out +++ b/ql/src/test/results/clientpositive/spark/bucket_map_join_tez1.q.out @@ -632,16 +632,12 @@ POSTHOOK: Input: default@tab_part POSTHOOK: Input: default@tab_part@ds=2008-04-08 #### A masked pattern was here #### 1166 -PREHOOK: query: -- one side is really bucketed. srcbucket_mapjoin is not really a bucketed table. --- In this case the sub-query is chosen as the big table. -explain +PREHOOK: query: explain select a.k1, a.v1, b.value from (select sum(substr(srcbucket_mapjoin.value,5)) as v1, key as k1 from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a join tab b on a.k1 = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- one side is really bucketed. srcbucket_mapjoin is not really a bucketed table. --- In this case the sub-query is chosen as the big table. -explain +POSTHOOK: query: explain select a.k1, a.v1, b.value from (select sum(substr(srcbucket_mapjoin.value,5)) as v1, key as k1 from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a join tab b on a.k1 = b.key @@ -1049,13 +1045,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- multi-way join -explain +PREHOOK: query: explain select a.key, a.value, b.value from tab_part a join tab b on a.key = b.key join tab c on a.key = c.key PREHOOK: type: QUERY -POSTHOOK: query: -- multi-way join -explain +POSTHOOK: query: explain select a.key, a.value, b.value from tab_part a join tab b on a.key = b.key join tab c on a.key = c.key POSTHOOK: type: QUERY @@ -1268,14 +1262,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- in this case sub-query is the small table -explain +PREHOOK: query: explain select a.key, a.value, b.value from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a join tab_part b on a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- in this case sub-query is the small table -explain +POSTHOOK: query: explain select a.key, a.value, b.value from (select key, sum(substr(srcbucket_mapjoin.value,5)) as value from srcbucket_mapjoin GROUP BY srcbucket_mapjoin.key) a join tab_part b on a.key = b.key @@ -1478,13 +1470,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- join on non-bucketed column results in broadcast join. -explain +PREHOOK: query: explain select a.key, a.value, b.value from tab a join tab_part b on a.value = b.value PREHOOK: type: QUERY -POSTHOOK: query: -- join on non-bucketed column results in broadcast join. -explain +POSTHOOK: query: explain select a.key, a.value, b.value from tab a join tab_part b on a.value = b.value POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/bucketizedhiveinputformat.q.out b/ql/src/test/results/clientpositive/spark/bucketizedhiveinputformat.q.out index 518adca..c4c2bce 100644 --- a/ql/src/test/results/clientpositive/spark/bucketizedhiveinputformat.q.out +++ b/ql/src/test/results/clientpositive/spark/bucketizedhiveinputformat.q.out @@ -66,46 +66,38 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv2.txt' INTO TABLE T3 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t3 -PREHOOK: query: -- 2 split by max.split.size -SELECT COUNT(1) FROM T2 +PREHOOK: query: SELECT COUNT(1) FROM T2 PREHOOK: type: QUERY PREHOOK: Input: default@t2 #### A masked pattern was here #### -POSTHOOK: query: -- 2 split by max.split.size -SELECT COUNT(1) FROM T2 +POSTHOOK: query: SELECT COUNT(1) FROM T2 POSTHOOK: type: QUERY POSTHOOK: Input: default@t2 #### A masked pattern was here #### 5000000 -PREHOOK: query: -- 1 split for two file -SELECT COUNT(1) FROM T3 +PREHOOK: query: SELECT COUNT(1) FROM T3 PREHOOK: type: QUERY PREHOOK: Input: default@t3 #### A masked pattern was here #### -POSTHOOK: query: -- 1 split for two file -SELECT COUNT(1) FROM T3 +POSTHOOK: query: SELECT COUNT(1) FROM T3 POSTHOOK: type: QUERY POSTHOOK: Input: default@t3 #### A masked pattern was here #### 1000 -PREHOOK: query: -- 1 split -SELECT COUNT(1) FROM T2 +PREHOOK: query: SELECT COUNT(1) FROM T2 PREHOOK: type: QUERY PREHOOK: Input: default@t2 #### A masked pattern was here #### -POSTHOOK: query: -- 1 split -SELECT COUNT(1) FROM T2 +POSTHOOK: query: SELECT COUNT(1) FROM T2 POSTHOOK: type: QUERY POSTHOOK: Input: default@t2 #### A masked pattern was here #### 5000000 -PREHOOK: query: -- 2 split for two file -SELECT COUNT(1) FROM T3 +PREHOOK: query: SELECT COUNT(1) FROM T3 PREHOOK: type: QUERY PREHOOK: Input: default@t3 #### A masked pattern was here #### -POSTHOOK: query: -- 2 split for two file -SELECT COUNT(1) FROM T3 +POSTHOOK: query: SELECT COUNT(1) FROM T3 POSTHOOK: type: QUERY POSTHOOK: Input: default@t3 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin1.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin1.q.out index 888b43a..8cfaabd 100644 --- a/ql/src/test/results/clientpositive/spark/bucketmapjoin1.q.out +++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin1.q.out @@ -22,14 +22,12 @@ POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part_2 (key int, value string) p POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: -- empty partitions (HIVE-3205) -explain extended +PREHOOK: query: explain extended select /*+mapjoin(b)*/ a.key, a.value, b.value from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b on a.key=b.key where b.ds="2008-04-08" PREHOOK: type: QUERY -POSTHOOK: query: -- empty partitions (HIVE-3205) -explain extended +POSTHOOK: query: explain extended select /*+mapjoin(b)*/ a.key, a.value, b.value from srcbucket_mapjoin_part a join srcbucket_mapjoin_part_2 b on a.key=b.key where b.ds="2008-04-08" diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin10.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin10.q.out index 772c534..871ec6f 100644 --- a/ql/src/test/results/clientpositive/spark/bucketmapjoin10.q.out +++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin10.q.out @@ -126,16 +126,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 3 POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@srcbucket_mapjoin_part_2 POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: -- The table bucketing metadata matches but the partition metadata does not, bucket map join should not be used - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL PREHOOK: type: QUERY -POSTHOOK: query: -- The table bucketing metadata matches but the partition metadata does not, bucket map join should not be used - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin11.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin11.q.out index 8633e2c..b6aa641 100644 --- a/ql/src/test/results/clientpositive/spark/bucketmapjoin11.q.out +++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin11.q.out @@ -134,18 +134,12 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=2 -PREHOOK: query: -- The table and partition bucketing metadata doesn't match but the bucket numbers of all partitions is --- a power of 2 and the bucketing columns match so bucket map join should be used - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL PREHOOK: type: QUERY -POSTHOOK: query: -- The table and partition bucketing metadata doesn't match but the bucket numbers of all partitions is --- a power of 2 and the bucketing columns match so bucket map join should be used - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part IS NOT NULL AND b.part IS NOT NULL diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin12.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin12.q.out index a71f4ed..73ffacb 100644 --- a/ql/src/test/results/clientpositive/spark/bucketmapjoin12.q.out +++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin12.q.out @@ -95,16 +95,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_3 CLUSTERED BY (key) INTO 2 POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@srcbucket_mapjoin_part_3 POSTHOOK: Output: default@srcbucket_mapjoin_part_3 -PREHOOK: query: -- The partition bucketing metadata match but one table is not bucketed, bucket map join should still be used - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' and b.part = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- The partition bucketing metadata match but one table is not bucketed, bucket map join should still be used - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' and b.part = '1' @@ -345,16 +341,12 @@ POSTHOOK: Input: default@srcbucket_mapjoin_part_2 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 #### A masked pattern was here #### 464 -PREHOOK: query: -- The table bucketing metadata match but one partition is not bucketed, bucket map join should not be used - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_3 b ON a.key = b.key AND a.part = '1' and b.part = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- The table bucketing metadata match but one partition is not bucketed, bucket map join should not be used - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_3 b ON a.key = b.key AND a.part = '1' and b.part = '1' diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin13.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin13.q.out index b4f4949..83a0c4a 100644 --- a/ql/src/test/results/clientpositive/spark/bucketmapjoin13.q.out +++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin13.q.out @@ -8,14 +8,12 @@ CLUSTERED BY (value) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -PREHOOK: query: -- part=1 partition for srcbucket_mapjoin_part_1 is bucketed by 'value' -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -POSTHOOK: query: -- part=1 partition for srcbucket_mapjoin_part_1 is bucketed by 'value' -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') +POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='1') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -30,14 +28,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (key) INTO 2 POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@srcbucket_mapjoin_part_1 POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -PREHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@srcbucket_mapjoin_part_1@part=2 -POSTHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') +POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_1 PARTITION (part='2') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -54,30 +50,24 @@ CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: -- part=1 partition for srcbucket_mapjoin_part_2 is bucketed by 'key' -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +PREHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') SELECT * FROM src PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 -POSTHOOK: query: -- part=1 partition for srcbucket_mapjoin_part_2 is bucketed by 'key' -INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') +POSTHOOK: query: INSERT OVERWRITE TABLE srcbucket_mapjoin_part_2 PARTITION (part='1') SELECT * FROM src POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@srcbucket_mapjoin_part_2@part=1 POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: srcbucket_mapjoin_part_2 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- part=1 partition for srcbucket_mapjoin_part_1 is bucketed by 'value' --- and it is also being joined. So, bucketed map-join cannot be performed -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- part=1 partition for srcbucket_mapjoin_part_1 is bucketed by 'value' --- and it is also being joined. So, bucketed map-join cannot be performed -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key @@ -360,16 +350,12 @@ POSTHOOK: Input: default@srcbucket_mapjoin_part_2 POSTHOOK: Input: default@srcbucket_mapjoin_part_2@part=1 #### A masked pattern was here #### 2056 -PREHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' --- and it is being joined. So, bucketed map-join can be performed -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key and a.part = '2' PREHOOK: type: QUERY -POSTHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' --- and it is being joined. So, bucketed map-join can be performed -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key and a.part = '2' @@ -621,16 +607,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1 drop partition (part = '1' POSTHOOK: type: ALTERTABLE_DROPPARTS POSTHOOK: Input: default@srcbucket_mapjoin_part_1 POSTHOOK: Output: default@srcbucket_mapjoin_part_1@part=1 -PREHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' --- and it is being joined. So, bucketed map-join can be performed -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' --- and it is being joined. So, bucketed map-join can be performed -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key @@ -882,18 +864,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_1 CLUSTERED BY (value) INTO POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@srcbucket_mapjoin_part_1 POSTHOOK: Output: default@srcbucket_mapjoin_part_1 -PREHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' --- and it is being joined. So, bucketed map-join can be performed --- The fact that the table is being bucketed by 'value' does not matter -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- part=2 partition for srcbucket_mapjoin_part_1 is bucketed by 'key' --- and it is being joined. So, bucketed map-join can be performed --- The fact that the table is being bucketed by 'value' does not matter -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin2.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin2.q.out index 5c215eb..35c9c99 100644 --- a/ql/src/test/results/clientpositive/spark/bucketmapjoin2.q.out +++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin2.q.out @@ -794,13 +794,11 @@ POSTHOOK: Input: default@bucketmapjoin_hash_result_1 POSTHOOK: Input: default@bucketmapjoin_hash_result_2 #### A masked pattern was here #### 0 0 0 -PREHOOK: query: -- HIVE-3210 -load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09') +PREHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09') PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@srcbucket_mapjoin_part_2 -POSTHOOK: query: -- HIVE-3210 -load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09') +POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO TABLE srcbucket_mapjoin_part_2 partition(ds='2008-04-09') POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@srcbucket_mapjoin_part_2 diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin6.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin6.q.out index 198404b..c9b344e 100644 --- a/ql/src/test/results/clientpositive/spark/bucketmapjoin6.q.out +++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin6.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table tmp1 (a string, b string) clustered by (a) sorted by (a) into 10 buckets +PREHOOK: query: create table tmp1 (a string, b string) clustered by (a) sorted by (a) into 10 buckets PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmp1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table tmp1 (a string, b string) clustered by (a) sorted by (a) into 10 buckets +POSTHOOK: query: create table tmp1 (a string, b string) clustered by (a) sorted by (a) into 10 buckets POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmp1 diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out index 971b8fc..66cdcce 100644 --- a/ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out +++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin7.q.out @@ -52,17 +52,13 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/srcbucket21.txt' INTO POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@srcbucket_mapjoin_part_2@ds=2008-04-08/hr=0 -PREHOOK: query: -- Tests that bucket map join works with a table with more than one level of partitioning - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.ds = '2008-04-08' AND b.ds = '2008-04-08' ORDER BY a.key, b.value LIMIT 1 PREHOOK: type: QUERY -POSTHOOK: query: -- Tests that bucket map join works with a table with more than one level of partitioning - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.ds = '2008-04-08' AND b.ds = '2008-04-08' diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin8.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin8.q.out index 145f39e..de7d6fa 100644 --- a/ql/src/test/results/clientpositive/spark/bucketmapjoin8.q.out +++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin8.q.out @@ -60,16 +60,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 3 POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@srcbucket_mapjoin_part_2 POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: -- The partition bucketing metadata match but the tables have different numbers of buckets, bucket map join should still be used - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' and b.part = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- The partition bucketing metadata match but the tables have different numbers of buckets, bucket map join should still be used - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' and b.part = '1' @@ -319,16 +315,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (value) INTO POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@srcbucket_mapjoin_part_2 POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: -- The partition bucketing metadata match but the tables are bucketed on different columns, bucket map join should still be used - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' and b.part = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- The partition bucketing metadata match but the tables are bucketed on different columns, bucket map join should still be used - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' and b.part = '1' diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin9.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin9.q.out index 4f054f5..67365c3 100644 --- a/ql/src/test/results/clientpositive/spark/bucketmapjoin9.q.out +++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin9.q.out @@ -68,16 +68,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@srcbucket_mapjoin_part_2 POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: -- The table bucketing metadata matches but the partitions have different numbers of buckets, bucket map join should not be used - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' and b.part = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- The table bucketing metadata matches but the partitions have different numbers of buckets, bucket map join should not be used - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' and b.part = '1' @@ -349,16 +345,12 @@ POSTHOOK: query: ALTER TABLE srcbucket_mapjoin_part_2 CLUSTERED BY (key) INTO 2 POSTHOOK: type: ALTERTABLE_CLUSTER_SORT POSTHOOK: Input: default@srcbucket_mapjoin_part_2 POSTHOOK: Output: default@srcbucket_mapjoin_part_2 -PREHOOK: query: -- The table bucketing metadata matches but the partitions are bucketed on different columns, bucket map join should not be used - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' AND b.part = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- The table bucketing metadata matches but the partitions are bucketed on different columns, bucket map join should not be used - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+ MAPJOIN(b) */ count(*) FROM srcbucket_mapjoin_part_1 a JOIN srcbucket_mapjoin_part_2 b ON a.key = b.key AND a.part = '1' AND b.part = '1' diff --git a/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative3.q.out b/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative3.q.out index aafd18a..d7658e3 100644 --- a/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative3.q.out +++ b/ql/src/test/results/clientpositive/spark/bucketmapjoin_negative3.q.out @@ -142,11 +142,9 @@ POSTHOOK: query: load data local inpath '../../data/files/srcbucket22.txt' INTO POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@test4 -PREHOOK: query: -- should be allowed -explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value +PREHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value PREHOOK: type: QUERY -POSTHOOK: query: -- should be allowed -explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value +POSTHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key=R.key AND L.value=R.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -554,11 +552,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- should not apply bucket mapjoin -explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key +PREHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key PREHOOK: type: QUERY -POSTHOOK: query: -- should not apply bucket mapjoin -explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key +POSTHOOK: query: explain extended select /* + MAPJOIN(R) */ * from test1 L join test1 R on L.key+L.key=R.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_2.q.out b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_2.q.out index 924afe9..81a064b 100644 --- a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_2.q.out +++ b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_2.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -78,17 +76,13 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table2@ds=2 POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b @@ -258,17 +252,13 @@ POSTHOOK: Input: default@test_table3@ds=1 5 val_5val_5 1 5 val_5val_5 1 9 val_9val_9 1 -PREHOOK: query: -- Since more than one partition of 'a' (the big table) is being selected, --- it should be a map-reduce job -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b ON a.key = b.key WHERE a.ds is not null and b.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Since more than one partition of 'a' (the big table) is being selected, --- it should be a map-reduce job -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b @@ -462,17 +452,13 @@ POSTHOOK: Input: default@test_table3@ds=1 5 val_5val_5 1 9 val_9val_9 1 9 val_9val_9 1 -PREHOOK: query: -- Since a single partition of the big table ('a') is being selected, it should be a map-only --- job even though multiple partitions of 'b' are being selected -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b ON a.key = b.key WHERE a.ds = '1' and b.ds is not null PREHOOK: type: QUERY -POSTHOOK: query: -- Since a single partition of the big table ('a') is being selected, it should be a map-only --- job even though multiple partitions of 'b' are being selected -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b @@ -666,8 +652,7 @@ POSTHOOK: Input: default@test_table3@ds=1 5 val_5val_5 1 9 val_9val_9 1 9 val_9val_9 1 -PREHOOK: query: -- This should be a map-only job -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM @@ -676,8 +661,7 @@ JOIN (select key, value from test_table2 where ds = '1') b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- This should be a map-only job -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM @@ -856,8 +840,7 @@ POSTHOOK: Input: default@test_table3@ds=1 5 val_5val_5 1 5 val_5val_5 1 9 val_9val_9 1 -PREHOOK: query: -- This should be a map-only job -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.v1, b.v2) FROM @@ -866,8 +849,7 @@ JOIN (select key, concat(value, value) as v2 from test_table2 where ds = '1') b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- This should be a map-only job -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.v1, b.v2) FROM @@ -1046,8 +1028,7 @@ POSTHOOK: Input: default@test_table3@ds=1 5 val_5val_5val_5val_5 1 5 val_5val_5val_5val_5 1 9 val_9val_9val_9val_9 1 -PREHOOK: query: -- This should be a map-reduce job -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key+a.key, concat(a.value, b.value) FROM @@ -1056,8 +1037,7 @@ JOIN (select key, value from test_table2 where ds = '1') b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- This should be a map-reduce job -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key+a.key, concat(a.value, b.value) FROM diff --git a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_4.q.out b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_4.q.out index 02d4676..4245aa1 100644 --- a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_4.q.out +++ b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_4.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -54,17 +52,13 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table2@ds=1 POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation, since the insert is happening on the bucketing position -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation, since the insert is happening on the bucketing position -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b @@ -245,17 +239,13 @@ CLUSTERED BY (value) SORTED BY (value) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table3 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation, since the insert is happening on a non-bucketing position -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation, since the insert is happening on a non-bucketing position -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a JOIN test_table2 b diff --git a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_6.q.out b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_6.q.out index c33dee3..5e4e5ef 100644 --- a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_6.q.out +++ b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_6.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key INT, key2 INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key, key2) SORTED BY (key ASC, key2 DESC) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -56,17 +54,13 @@ POSTHOOK: Output: default@test_table2@ds=1 POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation, since the sort-order matches -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, a.key2, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation, since the sort-order matches -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, a.key2, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b @@ -237,9 +231,7 @@ POSTHOOK: Input: default@test_table3@ds=1 5 6 val_5val_5 1 8 9 val_8val_8 1 9 10 val_9val_9 1 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation, since the sort-order matches -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT subq1.key, subq1.key2, subq1.value from ( @@ -248,9 +240,7 @@ FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' )subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation, since the sort-order matches -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT subq1.key, subq1.key2, subq1.value from ( @@ -430,17 +420,13 @@ POSTHOOK: Input: default@test_table3@ds=1 5 6 val_5val_5 1 8 9 val_8val_8 1 9 10 val_9val_9 1 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key2, a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key2, a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b @@ -546,9 +532,7 @@ STAGE PLANS: Stage: Stage-2 Stats-Aggr Operator -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT subq1.key2, subq1.key, subq1.value from ( @@ -557,9 +541,7 @@ FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' )subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT subq1.key2, subq1.key, subq1.value from ( @@ -668,9 +650,7 @@ STAGE PLANS: Stage: Stage-2 Stats-Aggr Operator -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT subq2.key, subq2.key2, subq2.value from ( @@ -682,9 +662,7 @@ ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' )subq1 )subq2 PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT subq2.key, subq2.key2, subq2.value from ( @@ -873,9 +851,7 @@ POSTHOOK: Input: default@test_table3@ds=1 5 6 val_5val_5 1 8 9 val_8val_8 1 9 10 val_9val_9 1 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT subq2.k2, subq2.k1, subq2.value from ( @@ -887,9 +863,7 @@ ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' )subq1 )subq2 PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT subq2.k2, subq2.k1, subq2.value from ( @@ -1088,9 +1062,7 @@ CLUSTERED BY (key, key2) SORTED BY (key DESC, key2 DESC) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table4 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table4 PARTITION (ds = '1') SELECT subq2.k2, subq2.k1, subq2.value from ( @@ -1102,9 +1074,7 @@ ON a.key = b.key and a.key2 = b.key2 WHERE a.ds = '1' and b.ds = '1' )subq1 )subq2 PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table4 PARTITION (ds = '1') SELECT subq2.k2, subq2.k1, subq2.value from ( diff --git a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_7.q.out b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_7.q.out index 0b64a87..7c23da7 100644 --- a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_7.q.out +++ b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_7.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -54,18 +52,14 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table2@ds=1 POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' and (a.key = 0 or a.key = 5) PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b @@ -234,8 +228,7 @@ POSTHOOK: Input: default@test_table3@ds=1 5 val_5val_5 1 5 val_5val_5 1 5 val_5val_5 1 -PREHOOK: query: -- This should be a map-only job -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM @@ -244,8 +237,7 @@ JOIN (select key, value from test_table2 where ds = '1' and (key = 0 or key = 5)) b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- This should be a map-only job -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM @@ -420,8 +412,7 @@ POSTHOOK: Input: default@test_table3@ds=1 5 val_5val_5 1 5 val_5val_5 1 5 val_5val_5 1 -PREHOOK: query: -- This should be a map-only job -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM @@ -431,8 +422,7 @@ JOIN ON a.key = b.key WHERE a.key = 0 or a.key = 5 PREHOOK: type: QUERY -POSTHOOK: query: -- This should be a map-only job -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, concat(a.value, b.value) FROM diff --git a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_8.q.out b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_8.q.out index be8fbfa..8263b6f 100644 --- a/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_8.q.out +++ b/ql/src/test/results/clientpositive/spark/bucketsortoptimize_insert_8.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -54,17 +52,13 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table2@ds=1 POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, b.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.key, b.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b @@ -235,17 +229,13 @@ POSTHOOK: Input: default@test_table3@ds=1 5 5 val_5val_5 1 5 5 val_5val_5 1 9 9 val_9val_9 1 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT b.key, a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b ON a.key = b.key WHERE a.ds = '1' and b.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT b.key, a.key, concat(a.value, b.value) FROM test_table1 a JOIN test_table2 b diff --git a/ql/src/test/results/clientpositive/spark/cbo_gby.q.out b/ql/src/test/results/clientpositive/spark/cbo_gby.q.out index 04597a7..d1fe9e9 100644 --- a/ql/src/test/results/clientpositive/spark/cbo_gby.q.out +++ b/ql/src/test/results/clientpositive/spark/cbo_gby.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 6. Test Select + TS + Join + Fil + GB + GB Having -select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key +PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 6. Test Select + TS + Join + Fil + GB + GB Having -select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key +POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/spark/cbo_gby_empty.q.out b/ql/src/test/results/clientpositive/spark/cbo_gby_empty.q.out index 68f0255..6970fd2 100644 --- a/ql/src/test/results/clientpositive/spark/cbo_gby_empty.q.out +++ b/ql/src/test/results/clientpositive/spark/cbo_gby_empty.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- 21. Test groupby is empty and there is no other cols in aggr -select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc +PREHOOK: query: select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- 21. Test groupby is empty and there is no other cols in aggr -select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc +POSTHOOK: query: select unionsrc.key FROM (select 'tst1' as key, count(1) as value from src) unionsrc POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/cbo_limit.q.out b/ql/src/test/results/clientpositive/spark/cbo_limit.q.out index 13df214..c582578 100644 --- a/ql/src/test/results/clientpositive/spark/cbo_limit.q.out +++ b/ql/src/test/results/clientpositive/spark/cbo_limit.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit -select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1 +PREHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 7. Test Select + TS + Join + Fil + GB + GB Having + Limit -select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1 +POSTHOOK: query: select key, (c_int+1)+2 as x, sum(c_int) from cbo_t1 group by c_float, cbo_t1.c_int, key order by x limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/spark/cbo_semijoin.q.out b/ql/src/test/results/clientpositive/spark/cbo_semijoin.q.out index bdd8125..6f79549 100644 --- a/ql/src/test/results/clientpositive/spark/cbo_semijoin.q.out +++ b/ql/src/test/results/clientpositive/spark/cbo_semijoin.q.out @@ -1,13 +1,11 @@ -PREHOOK: query: -- 12. SemiJoin -select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key +PREHOOK: query: select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 PREHOOK: Input: default@cbo_t2 PREHOOK: Input: default@cbo_t2@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 12. SemiJoin -select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key +POSTHOOK: query: select cbo_t1.c_int from cbo_t1 left semi join cbo_t2 on cbo_t1.key=cbo_t2.key POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/spark/cbo_simple_select.q.out b/ql/src/test/results/clientpositive/spark/cbo_simple_select.q.out index d161d9f..2e06e61 100644 --- a/ql/src/test/results/clientpositive/spark/cbo_simple_select.q.out +++ b/ql/src/test/results/clientpositive/spark/cbo_simple_select.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- 1. Test Select + TS -select * from cbo_t1 +PREHOOK: query: select * from cbo_t1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 1. Test Select + TS -select * from cbo_t1 +POSTHOOK: query: select * from cbo_t1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 @@ -130,14 +128,12 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -PREHOOK: query: -- 2. Test Select + TS + FIL -select * from cbo_t1 where cbo_t1.c_int >= 0 +PREHOOK: query: select * from cbo_t1 where cbo_t1.c_int >= 0 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 2. Test Select + TS + FIL -select * from cbo_t1 where cbo_t1.c_int >= 0 +POSTHOOK: query: select * from cbo_t1 where cbo_t1.c_int >= 0 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 @@ -244,14 +240,12 @@ POSTHOOK: Input: default@cbo_t1@dt=2014 1 1 25.0 1 1 25.0 1 1 25.0 -PREHOOK: query: -- 3 Test Select + Select + TS + FIL -select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 +PREHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 3 Test Select + Select + TS + FIL -select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 +POSTHOOK: query: select * from (select * from cbo_t1 where cbo_t1.c_int >= 0) as cbo_t1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 @@ -694,13 +688,11 @@ POSTHOOK: Input: default@cbo_t1@dt=2014 2.0 1 25.0 2.0 1 25.0 2.0 1 25.0 -PREHOOK: query: -- 13. null expr in select list -select null from cbo_t3 +PREHOOK: query: select null from cbo_t3 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t3 #### A masked pattern was here #### -POSTHOOK: query: -- 13. null expr in select list -select null from cbo_t3 +POSTHOOK: query: select null from cbo_t3 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t3 #### A masked pattern was here #### @@ -724,28 +716,24 @@ NULL NULL NULL NULL -PREHOOK: query: -- 14. unary operator -select key from cbo_t1 where c_int = -6 or c_int = +6 +PREHOOK: query: select key from cbo_t1 where c_int = -6 or c_int = +6 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 14. unary operator -select key from cbo_t1 where c_int = -6 or c_int = +6 +POSTHOOK: query: select key from cbo_t1 where c_int = -6 or c_int = +6 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -PREHOOK: query: -- 15. query referencing only partition columns -select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' +PREHOOK: query: select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 PREHOOK: Input: default@cbo_t2 PREHOOK: Input: default@cbo_t2@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 15. query referencing only partition columns -select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' +POSTHOOK: query: select count(cbo_t1.dt) from cbo_t1 join cbo_t2 on cbo_t1.dt = cbo_t2.dt where cbo_t1.dt = '2014' POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/spark/cbo_stats.q.out b/ql/src/test/results/clientpositive/spark/cbo_stats.q.out index 554a8f0..3747d31 100644 --- a/ql/src/test/results/clientpositive/spark/cbo_stats.q.out +++ b/ql/src/test/results/clientpositive/spark/cbo_stats.q.out @@ -1,12 +1,10 @@ -PREHOOK: query: -- 20. Test get stats with empty partition list -select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true +PREHOOK: query: select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t2 PREHOOK: Input: default@cbo_t2@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- 20. Test get stats with empty partition list -select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true +POSTHOOK: query: select cbo_t1.value from cbo_t1 join cbo_t2 on cbo_t1.key = cbo_t2.key where cbo_t1.dt = '10' and cbo_t1.c_boolean = true POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t2 diff --git a/ql/src/test/results/clientpositive/spark/cbo_subq_in.q.out b/ql/src/test/results/clientpositive/spark/cbo_subq_in.q.out index f6bfad2..39a4f8a 100644 --- a/ql/src/test/results/clientpositive/spark/cbo_subq_in.q.out +++ b/ql/src/test/results/clientpositive/spark/cbo_subq_in.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- 17. SubQueries In --- non agg, non corr -select * +PREHOOK: query: select * from src_cbo where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') order by key PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- 17. SubQueries In --- non agg, non corr -select * +POSTHOOK: query: select * from src_cbo where src_cbo.key in (select key from src_cbo s1 where s1.key > '9') order by key POSTHOOK: type: QUERY @@ -25,11 +21,7 @@ POSTHOOK: Input: default@src_cbo 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- agg, corr --- add back once rank issue fixed for cbo - --- distinct, corr -select * +PREHOOK: query: select * from src_cbo b where b.key in (select distinct a.key @@ -39,11 +31,7 @@ where b.key in PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- agg, corr --- add back once rank issue fixed for cbo - --- distinct, corr -select * +POSTHOOK: query: select * from src_cbo b where b.key in (select distinct a.key @@ -64,8 +52,7 @@ POSTHOOK: Input: default@src_cbo 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- non agg, corr, with join in Parent Query -select p.p_partkey, li.l_suppkey +PREHOOK: query: select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) @@ -73,8 +60,7 @@ where li.l_linenumber = 1 and PREHOOK: type: QUERY PREHOOK: Input: default@lineitem #### A masked pattern was here #### -POSTHOOK: query: -- non agg, corr, with join in Parent Query -select p.p_partkey, li.l_suppkey +POSTHOOK: query: select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) @@ -84,13 +70,7 @@ POSTHOOK: Input: default@lineitem #### A masked pattern was here #### 4297 1798 108570 8571 -PREHOOK: query: -- where and having --- Plan is: --- Stage 1: b semijoin sq1:src_cbo (subquery in where) --- Stage 2: group by Stage 1 o/p --- Stage 5: group by on sq2:src_cbo (subquery in having) --- Stage 6: Stage 2 o/p semijoin Stage 5 -select key, value, count(*) +PREHOOK: query: select key, value, count(*) from src_cbo b where b.key in (select key from src_cbo where src_cbo.key > '8') group by key, value @@ -98,13 +78,7 @@ having count(*) in (select count(*) from src_cbo s1 where s1.key > '9' group by PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- where and having --- Plan is: --- Stage 1: b semijoin sq1:src_cbo (subquery in where) --- Stage 2: group by Stage 1 o/p --- Stage 5: group by on sq2:src_cbo (subquery in having) --- Stage 6: Stage 2 o/p semijoin Stage 5 -select key, value, count(*) +POSTHOOK: query: select key, value, count(*) from src_cbo b where b.key in (select key from src_cbo where src_cbo.key > '8') group by key, value @@ -126,8 +100,7 @@ POSTHOOK: Input: default@src_cbo 96 val_96 1 97 val_97 2 98 val_98 2 -PREHOOK: query: -- non agg, non corr, windowing -select p_mfgr, p_name, avg(p_size) +PREHOOK: query: select p_mfgr, p_name, avg(p_size) from part group by p_mfgr, p_name having p_name in @@ -135,8 +108,7 @@ having p_name in PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- non agg, non corr, windowing -select p_mfgr, p_name, avg(p_size) +POSTHOOK: query: select p_mfgr, p_name, avg(p_size) from part group by p_mfgr, p_name having p_name in diff --git a/ql/src/test/results/clientpositive/spark/cbo_subq_not_in.q.out b/ql/src/test/results/clientpositive/spark/cbo_subq_not_in.q.out index c7274f7..c006d11 100644 --- a/ql/src/test/results/clientpositive/spark/cbo_subq_not_in.q.out +++ b/ql/src/test/results/clientpositive/spark/cbo_subq_not_in.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- 16. SubQueries Not In --- non agg, non corr -select * +PREHOOK: query: select * from src_cbo where src_cbo.key not in ( select key from src_cbo s1 @@ -9,9 +7,7 @@ where src_cbo.key not in PREHOOK: type: QUERY PREHOOK: Input: default@src_cbo #### A masked pattern was here #### -POSTHOOK: query: -- 16. SubQueries Not In --- non agg, non corr -select * +POSTHOOK: query: select * from src_cbo where src_cbo.key not in ( select key from src_cbo s1 @@ -139,8 +135,7 @@ POSTHOOK: Input: default@src_cbo 199 val_199 199 val_199 2 val_2 -PREHOOK: query: -- non agg, corr -select p_mfgr, b.p_name, p_size +PREHOOK: query: select p_mfgr, b.p_name, p_size from part b where b.p_name not in (select p_name @@ -150,8 +145,7 @@ where b.p_name not in PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- non agg, corr -select p_mfgr, b.p_name, p_size +POSTHOOK: query: select p_mfgr, b.p_name, p_size from part b where b.p_name not in (select p_name @@ -179,8 +173,7 @@ Manufacturer#4 almond antique violet mint lemon 39 Manufacturer#5 almond azure blanched chiffon midnight 23 Manufacturer#5 almond antique blue firebrick mint 31 Manufacturer#5 almond aquamarine dodger light gainsboro 46 -PREHOOK: query: -- agg, non corr -select p_name, p_size +PREHOOK: query: select p_name, p_size from part where part.p_size not in (select avg(p_size) @@ -190,8 +183,7 @@ part where part.p_size not in PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- agg, non corr -select p_name, p_size +POSTHOOK: query: select p_name, p_size from part where part.p_size not in (select avg(p_size) @@ -227,8 +219,7 @@ almond aquamarine sandy cyan gainsboro 18 almond aquamarine yellow dodger mint 7 almond azure aquamarine papaya violet 12 almond azure blanched chiffon midnight 23 -PREHOOK: query: -- agg, corr -select p_mfgr, p_name, p_size +PREHOOK: query: select p_mfgr, p_name, p_size from part b where b.p_size not in (select min(p_size) from (select p_mfgr, p_size from part) a @@ -237,8 +228,7 @@ from part b where b.p_size not in PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- agg, corr -select p_mfgr, p_name, p_size +POSTHOOK: query: select p_mfgr, p_name, p_size from part b where b.p_size not in (select min(p_size) from (select p_mfgr, p_size from part) a @@ -267,8 +257,7 @@ Manufacturer#2 almond aquamarine rose maroon antique 25 Manufacturer#2 almond aquamarine sandy cyan gainsboro 18 Manufacturer#4 almond azure aquamarine papaya violet 12 Manufacturer#5 almond azure blanched chiffon midnight 23 -PREHOOK: query: -- non agg, non corr, Group By in Parent Query -select li.l_partkey, count(*) +PREHOOK: query: select li.l_partkey, count(*) from lineitem li where li.l_linenumber = 1 and li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') @@ -276,8 +265,7 @@ group by li.l_partkey order by li.l_partkey PREHOOK: type: QUERY PREHOOK: Input: default@lineitem #### A masked pattern was here #### -POSTHOOK: query: -- non agg, non corr, Group By in Parent Query -select li.l_partkey, count(*) +POSTHOOK: query: select li.l_partkey, count(*) from lineitem li where li.l_linenumber = 1 and li.l_orderkey not in (select l_orderkey from lineitem where l_shipmode = 'AIR') @@ -301,10 +289,7 @@ POSTHOOK: Input: default@lineitem 139636 1 175839 1 182052 1 -PREHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. - --- non agg, corr, having -select b.p_mfgr, min(p_retailprice) +PREHOOK: query: select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr having b.p_mfgr not in @@ -316,10 +301,7 @@ having b.p_mfgr not in PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- add null check test from sq_notin.q once HIVE-7721 resolved. - --- non agg, corr, having -select b.p_mfgr, min(p_retailprice) +POSTHOOK: query: select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr having b.p_mfgr not in @@ -333,8 +315,7 @@ POSTHOOK: Input: default@part #### A masked pattern was here #### Manufacturer#1 1173.15 Manufacturer#2 1690.68 -PREHOOK: query: -- agg, non corr, having -select b.p_mfgr, min(p_retailprice) +PREHOOK: query: select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr having b.p_mfgr not in @@ -347,8 +328,7 @@ having b.p_mfgr not in PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- agg, non corr, having -select b.p_mfgr, min(p_retailprice) +POSTHOOK: query: select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr having b.p_mfgr not in diff --git a/ql/src/test/results/clientpositive/spark/cbo_udf_udaf.q.out b/ql/src/test/results/clientpositive/spark/cbo_udf_udaf.q.out index 156d02f..696d320 100644 --- a/ql/src/test/results/clientpositive/spark/cbo_udf_udaf.q.out +++ b/ql/src/test/results/clientpositive/spark/cbo_udf_udaf.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 8. Test UDF/UDAF -select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 +PREHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 8. Test UDF/UDAF -select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 +POSTHOOK: query: select count(*), count(c_int), sum(c_int), avg(c_int), max(c_int), min(c_int) from cbo_t1 POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/spark/cbo_union.q.out b/ql/src/test/results/clientpositive/spark/cbo_union.q.out index fb86d22..f6f36f6 100644 --- a/ql/src/test/results/clientpositive/spark/cbo_union.q.out +++ b/ql/src/test/results/clientpositive/spark/cbo_union.q.out @@ -1,17 +1,11 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 11. Union All -select * from (select * from cbo_t1 order by key, c_boolean, value, dt)a union all select * from (select * from cbo_t2 order by key, c_boolean, value, dt)b +PREHOOK: query: select * from (select * from cbo_t1 order by key, c_boolean, value, dt)a union all select * from (select * from cbo_t2 order by key, c_boolean, value, dt)b PREHOOK: type: QUERY PREHOOK: Input: default@cbo_t1 PREHOOK: Input: default@cbo_t1@dt=2014 PREHOOK: Input: default@cbo_t2 PREHOOK: Input: default@cbo_t2@dt=2014 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 11. Union All -select * from (select * from cbo_t1 order by key, c_boolean, value, dt)a union all select * from (select * from cbo_t2 order by key, c_boolean, value, dt)b +POSTHOOK: query: select * from (select * from cbo_t1 order by key, c_boolean, value, dt)a union all select * from (select * from cbo_t2 order by key, c_boolean, value, dt)b POSTHOOK: type: QUERY POSTHOOK: Input: default@cbo_t1 POSTHOOK: Input: default@cbo_t1@dt=2014 diff --git a/ql/src/test/results/clientpositive/spark/column_access_stats.q.out b/ql/src/test/results/clientpositive/spark/column_access_stats.q.out index ed2cfbb..b1c494c 100644 --- a/ql/src/test/results/clientpositive/spark/column_access_stats.q.out +++ b/ql/src/test/results/clientpositive/spark/column_access_stats.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This test is used for testing the ColumnAccessAnalyzer - -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 @@ -21,8 +18,7 @@ PREHOOK: query: CREATE TABLE T4(key STRING, val STRING) PARTITIONED BY (p STRING PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T4 -PREHOOK: query: -- Simple select queries -SELECT key FROM T1 +PREHOOK: query: SELECT key FROM T1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### @@ -79,8 +75,7 @@ PREHOOK: Input: default@t4 Table:default@t4 Columns:p,val -PREHOOK: query: -- More complicated select queries -EXPLAIN SELECT key FROM (SELECT key, val FROM T1) subq1 +PREHOOK: query: EXPLAIN SELECT key FROM (SELECT key, val FROM T1) subq1 PREHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -166,8 +161,7 @@ Columns:key,val 24.0 26.0 36.0 -PREHOOK: query: -- Work with union -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM ( SELECT key as c FROM T1 UNION ALL @@ -323,8 +317,7 @@ Columns:key 8 8 8 -PREHOOK: query: -- Work with insert overwrite -FROM T1 +PREHOOK: query: FROM T1 INSERT OVERWRITE TABLE T2 SELECT key, count(1) GROUP BY key INSERT OVERWRITE TABLE T3 SELECT key, sum(val) GROUP BY key PREHOOK: type: QUERY @@ -334,8 +327,7 @@ PREHOOK: Output: default@t3 Table:default@t1 Columns:key,val -PREHOOK: query: -- Simple joins -SELECT * +PREHOOK: query: SELECT * FROM T1 JOIN T2 ON T1.key = T2.key PREHOOK: type: QUERY @@ -460,8 +452,7 @@ Columns:key,val Table:default@t2 Columns:key,val -PREHOOK: query: -- Map join -SELECT /*+ MAPJOIN(a) */ * +PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY @@ -480,8 +471,7 @@ Columns:key,val 7 17 7 1 8 18 8 2 8 28 8 2 -PREHOOK: query: -- More joins -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM T1 JOIN T2 ON T1.key = T2.key AND T1.val = 3 and T2.val = 3 @@ -671,8 +661,7 @@ Columns:key,val Table:default@t2 Columns:key,val -PREHOOK: query: -- Join followed by join -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM ( @@ -829,8 +818,7 @@ Columns:key,val 7 7 17.0 8 8 46.0 8 8 46.0 -PREHOOK: query: -- for partitioned table -SELECT * FROM srcpart TABLESAMPLE (10 ROWS) +PREHOOK: query: SELECT * FROM srcpart TABLESAMPLE (10 ROWS) PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/spark/count.q.out b/ql/src/test/results/clientpositive/spark/count.q.out index 06f7235..eac2edd 100644 --- a/ql/src/test/results/clientpositive/spark/count.q.out +++ b/ql/src/test/results/clientpositive/spark/count.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS -create table abcd (a int, b int, c int, d int) +PREHOOK: query: create table abcd (a int, b int, c int, d int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@abcd -POSTHOOK: query: -- SORT_QUERY_RESULTS -create table abcd (a int, b int, c int, d int) +POSTHOOK: query: create table abcd (a int, b int, c int, d int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@abcd @@ -288,11 +286,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@abcd #### A masked pattern was here #### 7 7 6 6 6 7 3 3 6 7 4 5 6 6 5 6 4 5 5 5 4 -PREHOOK: query: --first aggregation with literal. gbinfo was generating wrong expression -explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd +PREHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd PREHOOK: type: QUERY -POSTHOOK: query: --first aggregation with literal. gbinfo was generating wrong expression -explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd +POSTHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -671,11 +667,9 @@ POSTHOOK: Input: default@abcd 1 1 1 1 1 1 -PREHOOK: query: --non distinct aggregate with same column as group by key -explain select a, count(distinct b), count(distinct c), sum(d), sum(d+d), sum(d*3), sum(b), sum(c), sum(a), sum(distinct a), sum(distinct b) from abcd group by a +PREHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d), sum(d+d), sum(d*3), sum(b), sum(c), sum(a), sum(distinct a), sum(distinct b) from abcd group by a PREHOOK: type: QUERY -POSTHOOK: query: --non distinct aggregate with same column as group by key -explain select a, count(distinct b), count(distinct c), sum(d), sum(d+d), sum(d*3), sum(b), sum(c), sum(a), sum(distinct a), sum(distinct b) from abcd group by a +POSTHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d), sum(d+d), sum(d*3), sum(b), sum(c), sum(a), sum(distinct a), sum(distinct b) from abcd group by a POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -737,11 +731,9 @@ POSTHOOK: Input: default@abcd 100 1 1 3 6 9 100 10 100 100 100 12 1 2 9 18 27 100 155 24 12 100 NULL 1 1 6 12 18 35 23 NULL NULL 35 -PREHOOK: query: --non distinct aggregate with same column as distinct aggregate -explain select a, count(distinct b), count(distinct c), sum(d), sum(c) from abcd group by a +PREHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d), sum(c) from abcd group by a PREHOOK: type: QUERY -POSTHOOK: query: --non distinct aggregate with same column as distinct aggregate -explain select a, count(distinct b), count(distinct c), sum(d), sum(c) from abcd group by a +POSTHOOK: query: explain select a, count(distinct b), count(distinct c), sum(d), sum(c) from abcd group by a POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -803,11 +795,9 @@ POSTHOOK: Input: default@abcd 100 1 1 3 10 12 1 2 9 155 NULL 1 1 6 23 -PREHOOK: query: --aggregation with literal -explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd +PREHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd PREHOOK: type: QUERY -POSTHOOK: query: --aggregation with literal -explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd +POSTHOOK: query: explain select count(1), count(*), count(a), count(b), count(c), count(d), count(distinct a), count(distinct b), count(distinct c), count(distinct d), count(distinct a,b), count(distinct b,c), count(distinct c,d), count(distinct a,d), count(distinct a,c), count(distinct b,d), count(distinct a,b,c), count(distinct b,c,d), count(distinct a,c,d), count(distinct a,b,d), count(distinct a,b,c,d) from abcd POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/cross_join.q.out b/ql/src/test/results/clientpositive/spark/cross_join.q.out index 5a921b4..270a6ae 100644 --- a/ql/src/test/results/clientpositive/spark/cross_join.q.out +++ b/ql/src/test/results/clientpositive/spark/cross_join.q.out @@ -1,9 +1,7 @@ Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product -PREHOOK: query: -- current -explain select src.key from src join src src2 +PREHOOK: query: explain select src.key from src join src src2 PREHOOK: type: QUERY -POSTHOOK: query: -- current -explain select src.key from src join src src2 +POSTHOOK: query: explain select src.key from src join src src2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -64,11 +62,9 @@ STAGE PLANS: ListSink Warning: Shuffle Join JOIN[6][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 2' is a cross product -PREHOOK: query: -- ansi cross join -explain select src.key from src cross join src src2 +PREHOOK: query: explain select src.key from src cross join src src2 PREHOOK: type: QUERY -POSTHOOK: query: -- ansi cross join -explain select src.key from src cross join src src2 +POSTHOOK: query: explain select src.key from src cross join src src2 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -128,11 +124,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- appending condition is allowed -explain select src.key from src cross join src src2 on src.key=src2.key +PREHOOK: query: explain select src.key from src cross join src src2 on src.key=src2.key PREHOOK: type: QUERY -POSTHOOK: query: -- appending condition is allowed -explain select src.key from src cross join src src2 on src.key=src2.key +POSTHOOK: query: explain select src.key from src cross join src src2 on src.key=src2.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/spark/cross_product_check_1.q.out index cc78834..e21b7e6 100644 --- a/ql/src/test/results/clientpositive/spark/cross_product_check_1.q.out +++ b/ql/src/test/results/clientpositive/spark/cross_product_check_1.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table A as +PREHOOK: query: create table A as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@A -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table A as +POSTHOOK: query: create table A as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/spark/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/spark/cross_product_check_2.q.out index 52b33a0..d66a5ba 100644 --- a/ql/src/test/results/clientpositive/spark/cross_product_check_2.q.out +++ b/ql/src/test/results/clientpositive/spark/cross_product_check_2.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table A as +PREHOOK: query: create table A as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@A -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table A as +POSTHOOK: query: create table A as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/spark/ctas.q.out b/ql/src/test/results/clientpositive/spark/ctas.q.out index a723365..4969930 100644 --- a/ql/src/test/results/clientpositive/spark/ctas.q.out +++ b/ql/src/test/results/clientpositive/spark/ctas.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) --- SORT_QUERY_RESULTS - -create table nzhang_Tmp(a int, b string) +PREHOOK: query: create table nzhang_Tmp(a int, b string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@nzhang_Tmp -POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) --- SORT_QUERY_RESULTS - -create table nzhang_Tmp(a int, b string) +POSTHOOK: query: create table nzhang_Tmp(a int, b string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@nzhang_Tmp diff --git a/ql/src/test/results/clientpositive/spark/custom_input_output_format.q.out b/ql/src/test/results/clientpositive/spark/custom_input_output_format.q.out index 662ed1a..edc972f 100644 --- a/ql/src/test/results/clientpositive/spark/custom_input_output_format.q.out +++ b/ql/src/test/results/clientpositive/spark/custom_input_output_format.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src1_rot13_iof(key STRING, value STRING) +PREHOOK: query: CREATE TABLE src1_rot13_iof(key STRING, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13InputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13OutputFormat' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src1_rot13_iof -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src1_rot13_iof(key STRING, value STRING) +POSTHOOK: query: CREATE TABLE src1_rot13_iof(key STRING, value STRING) STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13InputFormat' OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.udf.Rot13OutputFormat' POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/spark/date_join1.q.out b/ql/src/test/results/clientpositive/spark/date_join1.q.out index d551f5b..1aa9042 100644 --- a/ql/src/test/results/clientpositive/spark/date_join1.q.out +++ b/ql/src/test/results/clientpositive/spark/date_join1.q.out @@ -2,9 +2,7 @@ PREHOOK: query: drop table date_join1 PREHOOK: type: DROPTABLE POSTHOOK: query: drop table date_join1 POSTHOOK: type: DROPTABLE -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table date_join1 ( +PREHOOK: query: create table date_join1 ( ORIGIN_CITY_NAME string, DEST_CITY_NAME string, FL_DATE date, @@ -14,9 +12,7 @@ create table date_join1 ( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@date_join1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table date_join1 ( +POSTHOOK: query: create table date_join1 ( ORIGIN_CITY_NAME string, DEST_CITY_NAME string, FL_DATE date, @@ -34,16 +30,14 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/flights_join.txt' OVER POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@date_join1 -PREHOOK: query: -- Note that there are 2 rows with date 2000-11-28, so we should expect 4 rows with that date in the join results -select t1.fl_num, t1.fl_date, t2.fl_num, t2.fl_date +PREHOOK: query: select t1.fl_num, t1.fl_date, t2.fl_num, t2.fl_date from date_join1 t1 join date_join1 t2 on (t1.fl_date = t2.fl_date) PREHOOK: type: QUERY PREHOOK: Input: default@date_join1 #### A masked pattern was here #### -POSTHOOK: query: -- Note that there are 2 rows with date 2000-11-28, so we should expect 4 rows with that date in the join results -select t1.fl_num, t1.fl_date, t2.fl_num, t2.fl_date +POSTHOOK: query: select t1.fl_num, t1.fl_date, t2.fl_num, t2.fl_date from date_join1 t1 join date_join1 t2 on (t1.fl_date = t2.fl_date) diff --git a/ql/src/test/results/clientpositive/spark/date_udf.q.out b/ql/src/test/results/clientpositive/spark/date_udf.q.out index 2037367..37ad29e 100644 --- a/ql/src/test/results/clientpositive/spark/date_udf.q.out +++ b/ql/src/test/results/clientpositive/spark/date_udf.q.out @@ -74,15 +74,13 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt.1' OV POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@date_udf_flight -PREHOOK: query: -- Test UDFs with date input -select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d), +PREHOOK: query: select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d), weekofyear(d), to_date(d) from date_udf PREHOOK: type: QUERY PREHOOK: Input: default@date_udf #### A masked pattern was here #### -POSTHOOK: query: -- Test UDFs with date input -select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d), +POSTHOOK: query: select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d), weekofyear(d), to_date(d) from date_udf POSTHOOK: type: QUERY @@ -115,15 +113,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@date_udf #### A masked pattern was here #### 0 3333 -3333 -3333 3333 -PREHOOK: query: -- Test UDFs with string input -select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d), +PREHOOK: query: select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d), weekofyear(d), to_date(d) from date_udf_string PREHOOK: type: QUERY PREHOOK: Input: default@date_udf_string #### A masked pattern was here #### -POSTHOOK: query: -- Test UDFs with string input -select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d), +POSTHOOK: query: select unix_timestamp(d), year(d), month(d), day(d), dayofmonth(d), weekofyear(d), to_date(d) from date_udf_string POSTHOOK: type: QUERY @@ -195,8 +191,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@date_udf #### A masked pattern was here #### 1970-01-01 08:00:00 1969-12-31 16:00:00 2013-06-19 07:00:00 2013-06-18 17:00:00 -PREHOOK: query: -- should all be true -select +PREHOOK: query: select to_utc_timestamp(date '1970-01-01', 'America/Los_Angeles') = to_utc_timestamp(timestamp('1970-01-01 00:00:00'), 'America/Los_Angeles'), from_utc_timestamp(date '1970-01-01', 'America/Los_Angeles') = from_utc_timestamp(timestamp('1970-01-01 00:00:00'), 'America/Los_Angeles'), to_utc_timestamp(date '2013-06-19', 'America/Los_Angeles') = to_utc_timestamp(timestamp('2013-06-19 00:00:00'), 'America/Los_Angeles'), @@ -205,8 +200,7 @@ select PREHOOK: type: QUERY PREHOOK: Input: default@date_udf #### A masked pattern was here #### -POSTHOOK: query: -- should all be true -select +POSTHOOK: query: select to_utc_timestamp(date '1970-01-01', 'America/Los_Angeles') = to_utc_timestamp(timestamp('1970-01-01 00:00:00'), 'America/Los_Angeles'), from_utc_timestamp(date '1970-01-01', 'America/Los_Angeles') = from_utc_timestamp(timestamp('1970-01-01 00:00:00'), 'America/Los_Angeles'), to_utc_timestamp(date '2013-06-19', 'America/Los_Angeles') = to_utc_timestamp(timestamp('2013-06-19 00:00:00'), 'America/Los_Angeles'), @@ -216,13 +210,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@date_udf #### A masked pattern was here #### true true true true -PREHOOK: query: -- Aggregation functions (min/max) -select min(fl_date) from date_udf_flight +PREHOOK: query: select min(fl_date) from date_udf_flight PREHOOK: type: QUERY PREHOOK: Input: default@date_udf_flight #### A masked pattern was here #### -POSTHOOK: query: -- Aggregation functions (min/max) -select min(fl_date) from date_udf_flight +POSTHOOK: query: select min(fl_date) from date_udf_flight POSTHOOK: type: QUERY POSTHOOK: Input: default@date_udf_flight #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/decimal_join.q.out b/ql/src/test/results/clientpositive/spark/decimal_join.q.out index cc669a6..55bd03f 100644 --- a/ql/src/test/results/clientpositive/spark/decimal_join.q.out +++ b/ql/src/test/results/clientpositive/spark/decimal_join.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- HIVE-5292 Join on decimal columns fails --- SORT_QUERY_RESULTS - -create table src_dec (key decimal(3,0), value string) +PREHOOK: query: create table src_dec (key decimal(3,0), value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_dec -POSTHOOK: query: -- HIVE-5292 Join on decimal columns fails --- SORT_QUERY_RESULTS - -create table src_dec (key decimal(3,0), value string) +POSTHOOK: query: create table src_dec (key decimal(3,0), value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_dec diff --git a/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out b/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out index 96e9205..126838e 100644 --- a/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out +++ b/ql/src/test/results/clientpositive/spark/dynamic_rdd_cache.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- JOIN TEST - -EXPLAIN +PREHOOK: query: EXPLAIN FROM (SELECT src.* FROM src sort by key) X RIGHT OUTER JOIN @@ -11,9 +9,7 @@ JOIN ON (X.key = Z.key) SELECT sum(hash(Y.key,Y.value)) GROUP BY Y.key PREHOOK: type: QUERY -POSTHOOK: query: -- JOIN TEST - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM (SELECT src.* FROM src sort by key) X RIGHT OUTER JOIN @@ -311,15 +307,11 @@ POSTHOOK: query: DROP TABLE dest2 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@dest2 POSTHOOK: Output: default@dest2 -PREHOOK: query: -- UNION TEST - -CREATE TABLE tmptable(key STRING, value INT) +PREHOOK: query: CREATE TABLE tmptable(key STRING, value INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- UNION TEST - -CREATE TABLE tmptable(key STRING, value INT) +POSTHOOK: query: CREATE TABLE tmptable(key STRING, value INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable @@ -657,15 +649,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- CWE TEST - -CREATE TABLE inv(w_warehouse_name STRING , w_warehouse_sk INT , stdev INT , d_moy INT , mean INT , cov INT , inv_quantity_on_hand INT) +PREHOOK: query: CREATE TABLE inv(w_warehouse_name STRING , w_warehouse_sk INT , stdev INT , d_moy INT , mean INT , cov INT , inv_quantity_on_hand INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inv -POSTHOOK: query: -- CWE TEST - -CREATE TABLE inv(w_warehouse_name STRING , w_warehouse_sk INT , stdev INT , d_moy INT , mean INT , cov INT , inv_quantity_on_hand INT) +POSTHOOK: query: CREATE TABLE inv(w_warehouse_name STRING , w_warehouse_sk INT , stdev INT , d_moy INT , mean INT , cov INT , inv_quantity_on_hand INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inv diff --git a/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out b/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out index 1b14d88..1c30941 100644 --- a/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out +++ b/ql/src/test/results/clientpositive/spark/escape_clusterby1.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- escaped column names in cluster by are not working jira 3267 -explain +PREHOOK: query: explain select key, value from src cluster by key, value PREHOOK: type: QUERY -POSTHOOK: query: -- escaped column names in cluster by are not working jira 3267 -explain +POSTHOOK: query: explain select key, value from src cluster by key, value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out b/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out index 3f54a0e..ddaa9bf 100644 --- a/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out +++ b/ql/src/test/results/clientpositive/spark/escape_distributeby1.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- escaped column names in distribute by by are not working jira 3267 -explain +PREHOOK: query: explain select key, value from src distribute by key, value PREHOOK: type: QUERY -POSTHOOK: query: -- escaped column names in distribute by by are not working jira 3267 -explain +POSTHOOK: query: explain select key, value from src distribute by key, value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/escape_orderby1.q.out b/ql/src/test/results/clientpositive/spark/escape_orderby1.q.out index 3897082..83ebe1a 100644 --- a/ql/src/test/results/clientpositive/spark/escape_orderby1.q.out +++ b/ql/src/test/results/clientpositive/spark/escape_orderby1.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- escaped column names in order by are not working jira 3267 -explain +PREHOOK: query: explain select key, value from src order by key, value PREHOOK: type: QUERY -POSTHOOK: query: -- escaped column names in order by are not working jira 3267 -explain +POSTHOOK: query: explain select key, value from src order by key, value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/escape_sortby1.q.out b/ql/src/test/results/clientpositive/spark/escape_sortby1.q.out index ebaa5e1..dba5a7e 100644 --- a/ql/src/test/results/clientpositive/spark/escape_sortby1.q.out +++ b/ql/src/test/results/clientpositive/spark/escape_sortby1.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- escaped column names in sort by are not working jira 3267 -explain +PREHOOK: query: explain select key, value from src sort by key, value PREHOOK: type: QUERY -POSTHOOK: query: -- escaped column names in sort by are not working jira 3267 -explain +POSTHOOK: query: explain select key, value from src sort by key, value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/filter_join_breaktask.q.out b/ql/src/test/results/clientpositive/spark/filter_join_breaktask.q.out index 26401fb..30c20fa 100644 --- a/ql/src/test/results/clientpositive/spark/filter_join_breaktask.q.out +++ b/ql/src/test/results/clientpositive/spark/filter_join_breaktask.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string) +PREHOOK: query: CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@filter_join_breaktask -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string) +POSTHOOK: query: CREATE TABLE filter_join_breaktask(key int, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@filter_join_breaktask diff --git a/ql/src/test/results/clientpositive/spark/filter_join_breaktask2.q.out b/ql/src/test/results/clientpositive/spark/filter_join_breaktask2.q.out index af85af9..6a2396e 100644 --- a/ql/src/test/results/clientpositive/spark/filter_join_breaktask2.q.out +++ b/ql/src/test/results/clientpositive/spark/filter_join_breaktask2.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) +PREHOOK: query: create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) +POSTHOOK: query: create table T1(c1 string, c2 string, c3 string, c4 string, c5 string, c6 string, c7 string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/spark/groupby1.q.out b/ql/src/test/results/clientpositive/spark/groupby1.q.out index 8f60691..42ce243 100644 --- a/ql/src/test/results/clientpositive/spark/groupby1.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_g1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_g1 diff --git a/ql/src/test/results/clientpositive/spark/groupby10.q.out b/ql/src/test/results/clientpositive/spark/groupby10.q.out index 7b1e616..b572995 100644 --- a/ql/src/test/results/clientpositive/spark/groupby10.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby10.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, val1 INT, val2 INT) +PREHOOK: query: CREATE TABLE dest1(key INT, val1 INT, val2 INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, val1 INT, val2 INT) +POSTHOOK: query: CREATE TABLE dest1(key INT, val1 INT, val2 INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 @@ -458,16 +454,12 @@ POSTHOOK: Input: default@dest2 66 66 66 86 86 86 98 98 98 -PREHOOK: query: -- HIVE-3852 Multi-groupby optimization fails when same distinct column is used twice or more - -EXPLAIN +PREHOOK: query: EXPLAIN FROM INPUT INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), avg(distinct substr(INPUT.value,5)) GROUP BY INPUT.key PREHOOK: type: QUERY -POSTHOOK: query: -- HIVE-3852 Multi-groupby optimization fails when same distinct column is used twice or more - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM INPUT INSERT OVERWRITE TABLE dest1 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), count(distinct substr(INPUT.value,5)) GROUP BY INPUT.key INSERT OVERWRITE TABLE dest2 SELECT INPUT.key, sum(distinct substr(INPUT.value,5)), avg(distinct substr(INPUT.value,5)) GROUP BY INPUT.key diff --git a/ql/src/test/results/clientpositive/spark/groupby11.q.out b/ql/src/test/results/clientpositive/spark/groupby11.q.out index eefbc91..a0f99c4 100644 --- a/ql/src/test/results/clientpositive/spark/groupby11.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby11.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, val1 INT, val2 INT) partitioned by (ds string) +PREHOOK: query: CREATE TABLE dest1(key STRING, val1 INT, val2 INT) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, val1 INT, val2 INT) partitioned by (ds string) +POSTHOOK: query: CREATE TABLE dest1(key STRING, val1 INT, val2 INT) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/groupby1_map.q.out b/ql/src/test/results/clientpositive/spark/groupby1_map.q.out index d240c98..b414aa6 100644 --- a/ql/src/test/results/clientpositive/spark/groupby1_map.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby1_map.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/groupby1_map_nomap.q.out b/ql/src/test/results/clientpositive/spark/groupby1_map_nomap.q.out index 0799ff5..a01cee1 100644 --- a/ql/src/test/results/clientpositive/spark/groupby1_map_nomap.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby1_map_nomap.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/groupby1_map_skew.q.out b/ql/src/test/results/clientpositive/spark/groupby1_map_skew.q.out index 32355d4..f7b7f7a 100644 --- a/ql/src/test/results/clientpositive/spark/groupby1_map_skew.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby1_map_skew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/groupby1_noskew.q.out b/ql/src/test/results/clientpositive/spark/groupby1_noskew.q.out index 4dfe32c..1b7e53b 100644 --- a/ql/src/test/results/clientpositive/spark/groupby1_noskew.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby1_noskew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_g1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_g1(key INT, value DOUBLE) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_g1 diff --git a/ql/src/test/results/clientpositive/spark/groupby2.q.out b/ql/src/test/results/clientpositive/spark/groupby2.q.out index cf01d51..a5cd0e6 100644 --- a/ql/src/test/results/clientpositive/spark/groupby2.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby2.q.out @@ -102,15 +102,11 @@ POSTHOOK: Output: default@dest_g2 POSTHOOK: Lineage: dest_g2.c1 EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: dest_g2.c2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: dest_g2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT dest_g2.* FROM dest_g2 +PREHOOK: query: SELECT dest_g2.* FROM dest_g2 PREHOOK: type: QUERY PREHOOK: Input: default@dest_g2 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT dest_g2.* FROM dest_g2 +POSTHOOK: query: SELECT dest_g2.* FROM dest_g2 POSTHOOK: type: QUERY POSTHOOK: Input: default@dest_g2 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/groupby2_map.q.out b/ql/src/test/results/clientpositive/spark/groupby2_map.q.out index 40da0f3..d2b69af 100644 --- a/ql/src/test/results/clientpositive/spark/groupby2_map.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby2_map.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/groupby2_map_multi_distinct.q.out b/ql/src/test/results/clientpositive/spark/groupby2_map_multi_distinct.q.out index 5b9c8e4..4ad0568 100644 --- a/ql/src/test/results/clientpositive/spark/groupby2_map_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby2_map_multi_distinct.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 @@ -118,15 +114,11 @@ POSTHOOK: Input: default@dest1 7 6 7735.0 447 10 8 8 8762.0 595 10 9 7 91047.0 577 12 -PREHOOK: query: -- HIVE-5560 when group by key is used in distinct funtion, invalid result are returned - -EXPLAIN +PREHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY -POSTHOOK: query: -- HIVE-5560 when group by key is used in distinct funtion, invalid result are returned - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE dest1 SELECT substr(src.key,1,1), count(DISTINCT substr(src.key,1,1)), concat(substr(src.key,1,1),sum(substr(src.value,5))), sum(DISTINCT substr(src.value, 5)), count(src.value) GROUP BY substr(src.key,1,1) POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/groupby2_noskew.q.out b/ql/src/test/results/clientpositive/spark/groupby2_noskew.q.out index 13f8c18..8ecf769 100644 --- a/ql/src/test/results/clientpositive/spark/groupby2_noskew.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby2_noskew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_g2 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_g2 diff --git a/ql/src/test/results/clientpositive/spark/groupby2_noskew_multi_distinct.q.out b/ql/src/test/results/clientpositive/spark/groupby2_noskew_multi_distinct.q.out index 0613e73..3ede0fc 100644 --- a/ql/src/test/results/clientpositive/spark/groupby2_noskew_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby2_noskew_multi_distinct.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_g2 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_g2 diff --git a/ql/src/test/results/clientpositive/spark/groupby4.q.out b/ql/src/test/results/clientpositive/spark/groupby4.q.out index b764466..3ad01d0 100644 --- a/ql/src/test/results/clientpositive/spark/groupby4.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby4.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/groupby4_noskew.q.out b/ql/src/test/results/clientpositive/spark/groupby4_noskew.q.out index 18ee5c7..04f58fa 100644 --- a/ql/src/test/results/clientpositive/spark/groupby4_noskew.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby4_noskew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/groupby5.q.out b/ql/src/test/results/clientpositive/spark/groupby5.q.out index d7d2140..41b46f8 100644 --- a/ql/src/test/results/clientpositive/spark/groupby5.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby5.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/groupby5_noskew.q.out b/ql/src/test/results/clientpositive/spark/groupby5_noskew.q.out index 5be29fe..ccfd32a 100644 --- a/ql/src/test/results/clientpositive/spark/groupby5_noskew.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby5_noskew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/groupby6_map.q.out b/ql/src/test/results/clientpositive/spark/groupby6_map.q.out index 77af675..03f68c6 100644 --- a/ql/src/test/results/clientpositive/spark/groupby6_map.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby6_map.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/groupby6_map_skew.q.out b/ql/src/test/results/clientpositive/spark/groupby6_map_skew.q.out index 527bf14..606b5d5 100644 --- a/ql/src/test/results/clientpositive/spark/groupby6_map_skew.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby6_map_skew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/groupby6_noskew.q.out b/ql/src/test/results/clientpositive/spark/groupby6_noskew.q.out index 2d8fb74..eb72f01 100644 --- a/ql/src/test/results/clientpositive/spark/groupby6_noskew.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby6_noskew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/groupby7.q.out b/ql/src/test/results/clientpositive/spark/groupby7.q.out index bb5f62c..ee0153a 100644 --- a/ql/src/test/results/clientpositive/spark/groupby7.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby7.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/spark/groupby7_map.q.out b/ql/src/test/results/clientpositive/spark/groupby7_map.q.out index 3b5c22a..88b6304 100644 --- a/ql/src/test/results/clientpositive/spark/groupby7_map.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby7_map.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/spark/groupby7_map_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/spark/groupby7_map_multi_single_reducer.q.out index fad6864..456dda1 100644 --- a/ql/src/test/results/clientpositive/spark/groupby7_map_multi_single_reducer.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby7_map_multi_single_reducer.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/spark/groupby7_map_skew.q.out b/ql/src/test/results/clientpositive/spark/groupby7_map_skew.q.out index 3ba0022..7bf7bbd 100644 --- a/ql/src/test/results/clientpositive/spark/groupby7_map_skew.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby7_map_skew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/spark/groupby7_noskew.q.out b/ql/src/test/results/clientpositive/spark/groupby7_noskew.q.out index 8c985c5..89fea83 100644 --- a/ql/src/test/results/clientpositive/spark/groupby7_noskew.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby7_noskew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/spark/groupby7_noskew_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/spark/groupby7_noskew_multi_single_reducer.q.out index 750a3fe..84948ac 100644 --- a/ql/src/test/results/clientpositive/spark/groupby7_noskew_multi_single_reducer.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby7_noskew_multi_single_reducer.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/spark/groupby8.q.out b/ql/src/test/results/clientpositive/spark/groupby8.q.out index 7f48c5b..3158bc7 100644 --- a/ql/src/test/results/clientpositive/spark/groupby8.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby8.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/spark/groupby8_map.q.out b/ql/src/test/results/clientpositive/spark/groupby8_map.q.out index 87e08f1..662f505 100644 --- a/ql/src/test/results/clientpositive/spark/groupby8_map.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby8_map.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/spark/groupby8_map_skew.q.out b/ql/src/test/results/clientpositive/spark/groupby8_map_skew.q.out index 295a93c..ac6a87b 100644 --- a/ql/src/test/results/clientpositive/spark/groupby8_map_skew.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby8_map_skew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/spark/groupby8_noskew.q.out b/ql/src/test/results/clientpositive/spark/groupby8_noskew.q.out index 87e08f1..662f505 100644 --- a/ql/src/test/results/clientpositive/spark/groupby8_noskew.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby8_noskew.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/spark/groupby9.q.out b/ql/src/test/results/clientpositive/spark/groupby9.q.out index b24afa3..96f812f 100644 --- a/ql/src/test/results/clientpositive/spark/groupby9.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby9.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/spark/groupby_complex_types.q.out b/ql/src/test/results/clientpositive/spark/groupby_complex_types.q.out index 557f9e4..169ee04 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_complex_types.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_complex_types.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out index 7fccb4f..7a730d2 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_complex_types_multi_single_reducer.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key ARRAY, value BIGINT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 diff --git a/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out b/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out index 9bad0f6..56b8c2f 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_cube1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 diff --git a/ql/src/test/results/clientpositive/spark/groupby_grouping_id2.q.out b/ql/src/test/results/clientpositive/spark/groupby_grouping_id2.q.out index 20f2fb9..9c85852 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_grouping_id2.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_grouping_id2.q.out @@ -14,15 +14,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/groupby_groupingid.txt POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP +PREHOOK: query: SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP +POSTHOOK: query: SELECT key, value, GROUPING__ID, count(*) from T1 GROUP BY key, value WITH ROLLUP POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/groupby_map_ppr.q.out b/ql/src/test/results/clientpositive/spark/groupby_map_ppr.q.out index beae497..ba79196 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_map_ppr.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_map_ppr.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/groupby_map_ppr_multi_distinct.q.out b/ql/src/test/results/clientpositive/spark/groupby_map_ppr_multi_distinct.q.out index 2ad4d68..3759157 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_map_ppr_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_map_ppr_multi_distinct.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, C3 INT, c4 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, C3 INT, c4 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, C3 INT, c4 INT) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, C3 INT, c4 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/groupby_multi_insert_common_distinct.q.out b/ql/src/test/results/clientpositive/spark/groupby_multi_insert_common_distinct.q.out index 0241cd2..bf4132a 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_multi_insert_common_distinct.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_multi_insert_common_distinct.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table dest1(key int, cnt int) +PREHOOK: query: create table dest1(key int, cnt int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table dest1(key int, cnt int) +POSTHOOK: query: create table dest1(key int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out index fe3c5c2..c16df1b 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_g2 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_g2 diff --git a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out index 5d15040..37deb93 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out @@ -14,16 +14,12 @@ POSTHOOK: query: CREATE TABLE dest_g3(key STRING, c1 INT, c2 INT) STORED AS TEXT POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_g3 -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) INSERT OVERWRITE TABLE dest_g3 SELECT substr(src.key,1,1), count(DISTINCT src.key), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE dest_g2 SELECT substr(src.key,1,1), count(DISTINCT src.key) WHERE substr(src.key,1,1) >= 5 GROUP BY substr(src.key,1,1) INSERT OVERWRITE TABLE dest_g3 SELECT substr(src.key,1,1), count(DISTINCT src.key), count(src.value) WHERE substr(src.key,1,1) < 5 GROUP BY substr(src.key,1,1) diff --git a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out index 982d719..b612747 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- HIVE-3849 Aliased column in where clause for multi-groupby single reducer cannot be resolved - --- SORT_QUERY_RESULTS - -create table e1 (key string, count int) +PREHOOK: query: create table e1 (key string, count int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@e1 -POSTHOOK: query: -- HIVE-3849 Aliased column in where clause for multi-groupby single reducer cannot be resolved - --- SORT_QUERY_RESULTS - -create table e1 (key string, count int) +POSTHOOK: query: create table e1 (key string, count int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@e1 diff --git a/ql/src/test/results/clientpositive/spark/groupby_position.q.out b/ql/src/test/results/clientpositive/spark/groupby_position.q.out index cfb96c0..24be0dc 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_position.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_position.q.out @@ -14,16 +14,12 @@ POSTHOOK: query: CREATE TABLE testTable2(key INT, val1 STRING, val2 STRING) STOR POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@testTable2 -PREHOOK: query: -- Position Alias in GROUP BY and ORDER BY - -EXPLAIN +PREHOOK: query: EXPLAIN FROM SRC INSERT OVERWRITE TABLE testTable1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) WHERE SRC.key < 20 GROUP BY 1 INSERT OVERWRITE TABLE testTable2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) WHERE SRC.key < 20 GROUP BY 1, 2 PREHOOK: type: QUERY -POSTHOOK: query: -- Position Alias in GROUP BY and ORDER BY - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM SRC INSERT OVERWRITE TABLE testTable1 SELECT SRC.key, COUNT(DISTINCT SUBSTR(SRC.value,5)) WHERE SRC.key < 20 GROUP BY 1 INSERT OVERWRITE TABLE testTable2 SELECT SRC.key, SRC.value, COUNT(DISTINCT SUBSTR(SRC.value,5)) WHERE SRC.key < 20 GROUP BY 1, 2 @@ -404,16 +400,12 @@ POSTHOOK: Input: default@testtable2 17 val_17 1 18 val_18 1 19 val_19 1 -PREHOOK: query: -- Position Alias in subquery - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT t.key, t.value FROM (SELECT b.key as key, count(1) as value FROM src b WHERE b.key <= 20 GROUP BY 1) t ORDER BY 2 DESC, 1 ASC PREHOOK: type: QUERY -POSTHOOK: query: -- Position Alias in subquery - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT t.key, t.value FROM (SELECT b.key as key, count(1) as value FROM src b WHERE b.key <= 20 GROUP BY 1) t ORDER BY 2 DESC, 1 ASC diff --git a/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out b/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out index f1e1027..bed6d47 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_ppr.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/groupby_ppr_multi_distinct.q.out b/ql/src/test/results/clientpositive/spark/groupby_ppr_multi_distinct.q.out index 7d2f9c3..418c2cc 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_ppr_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_ppr_multi_distinct.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key STRING, c1 INT, c2 STRING, c3 INT, c4 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/groupby_resolution.q.out b/ql/src/test/results/clientpositive/spark/groupby_resolution.q.out index fd6e423..6352c2b 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_resolution.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_resolution.q.out @@ -494,8 +494,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- windowing after group by -select key, count(*), rank() over(order by count(*)) +PREHOOK: query: select key, count(*), rank() over(order by count(*)) from src b where key < '12' group by b.key @@ -503,8 +502,7 @@ order by b.key PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- windowing after group by -select key, count(*), rank() over(order by count(*)) +POSTHOOK: query: select key, count(*), rank() over(order by count(*)) from src b where key < '12' group by b.key @@ -525,8 +523,7 @@ POSTHOOK: Input: default@src 116 1 1 118 2 7 119 3 12 -PREHOOK: query: -- having after group by -select key, count(*) +PREHOOK: query: select key, count(*) from src b group by b.key having key < '12' @@ -534,8 +531,7 @@ order by b.key PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- having after group by -select key, count(*) +POSTHOOK: query: select key, count(*) from src b group by b.key having key < '12' @@ -556,8 +552,7 @@ POSTHOOK: Input: default@src 116 1 118 2 119 3 -PREHOOK: query: -- having and windowing -select key, count(*), rank() over(order by count(*)) +PREHOOK: query: select key, count(*), rank() over(order by count(*)) from src b group by b.key having key < '12' @@ -565,8 +560,7 @@ order by b.key PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- having and windowing -select key, count(*), rank() over(order by count(*)) +POSTHOOK: query: select key, count(*), rank() over(order by count(*)) from src b group by b.key having key < '12' @@ -705,8 +699,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- order by -select key +PREHOOK: query: select key from src t where key < '12' group by t.key @@ -714,8 +707,7 @@ order by t.key PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- order by -select key +POSTHOOK: query: select key from src t where key < '12' group by t.key @@ -736,12 +728,10 @@ POSTHOOK: Input: default@src 116 118 119 -PREHOOK: query: -- cluster by -EXPLAIN +PREHOOK: query: EXPLAIN SELECT x.key, x.value as key FROM SRC x CLUSTER BY key PREHOOK: type: QUERY -POSTHOOK: query: -- cluster by -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT x.key, x.value as key FROM SRC x CLUSTER BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out b/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out index ce003b8..6d087b2 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_rollup1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 diff --git a/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out b/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out index 92ca67b..542650a 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_sort_1_23.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) --- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) --- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -22,13 +16,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Output: default@t1 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 @@ -42,15 +34,11 @@ POSTHOOK: query: CREATE TABLE outputTbl1(key int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key --- matches the sorted key -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T1 GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key --- matches the sorted key -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T1 GROUP BY key POSTHOOK: type: QUERY @@ -233,13 +221,11 @@ POSTHOOK: query: CREATE TABLE outputTbl2(key1 int, key2 string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl2 -PREHOOK: query: -- no map-side group by even if the group by key is a superset of sorted key -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl2 SELECT key, val, count(1) FROM T1 GROUP BY key, val PREHOOK: type: QUERY -POSTHOOK: query: -- no map-side group by even if the group by key is a superset of sorted key -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl2 SELECT key, val, count(1) FROM T1 GROUP BY key, val POSTHOOK: type: QUERY @@ -436,13 +422,11 @@ POSTHOOK: Input: default@outputtbl2 7 17 1 8 18 1 8 28 1 -PREHOOK: query: -- It should work for sub-queries -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- It should work for sub-queries -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key POSTHOOK: type: QUERY @@ -617,13 +601,11 @@ POSTHOOK: Input: default@outputtbl1 3 1 7 1 8 2 -PREHOOK: query: -- It should work for sub-queries with column aliases -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k PREHOOK: type: QUERY -POSTHOOK: query: -- It should work for sub-queries with column aliases -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k POSTHOOK: type: QUERY @@ -806,15 +788,11 @@ POSTHOOK: query: CREATE TABLE outputTbl3(key1 int, key2 int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl3 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant followed --- by a match to the sorted key -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl3 SELECT 1, key, count(1) FROM T1 GROUP BY 1, key PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant followed --- by a match to the sorted key -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl3 SELECT 1, key, count(1) FROM T1 GROUP BY 1, key POSTHOOK: type: QUERY @@ -998,13 +976,11 @@ POSTHOOK: query: CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt in POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl4 -PREHOOK: query: -- no map-side group by if the group by key contains a constant followed by another column -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val PREHOOK: type: QUERY -POSTHOOK: query: -- no map-side group by if the group by key contains a constant followed by another column -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val POSTHOOK: type: QUERY @@ -1202,13 +1178,11 @@ POSTHOOK: Input: default@outputtbl4 7 1 17 1 8 1 18 1 8 1 28 1 -PREHOOK: query: -- no map-side group by if the group by key contains a function -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl3 SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 PREHOOK: type: QUERY -POSTHOOK: query: -- no map-side group by if the group by key contains a function -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl3 SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 POSTHOOK: type: QUERY @@ -1404,21 +1378,13 @@ POSTHOOK: Input: default@outputtbl3 3 4 1 7 8 1 8 9 2 -PREHOOK: query: -- it should not matter what follows the group by --- test various cases - --- group by followed by another group by -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key + key, sum(cnt) from (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 group by key + key PREHOOK: type: QUERY -POSTHOOK: query: -- it should not matter what follows the group by --- test various cases - --- group by followed by another group by -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key + key, sum(cnt) from (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 @@ -1629,8 +1595,7 @@ POSTHOOK: Input: default@outputtbl1 2 1 4 1 6 1 -PREHOOK: query: -- group by followed by a union -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( SELECT key, count(1) FROM T1 GROUP BY key @@ -1638,8 +1603,7 @@ SELECT key, count(1) FROM T1 GROUP BY key SELECT key, count(1) FROM T1 GROUP BY key ) subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a union -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( SELECT key, count(1) FROM T1 GROUP BY key @@ -1935,8 +1899,7 @@ POSTHOOK: Input: default@outputtbl1 7 1 8 2 8 2 -PREHOOK: query: -- group by followed by a union where one of the sub-queries is map-side group by -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( SELECT key, count(1) FROM T1 GROUP BY key @@ -1944,8 +1907,7 @@ SELECT key, count(1) FROM T1 GROUP BY key SELECT cast(key + key as string) as key, count(1) FROM T1 GROUP BY key + key ) subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a union where one of the sub-queries is map-side group by -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( SELECT key, count(1) FROM T1 GROUP BY key @@ -2265,8 +2227,7 @@ POSTHOOK: Input: default@outputtbl1 6 1 7 1 8 2 -PREHOOK: query: -- group by followed by a join -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT subq1.key, subq1.cnt+subq2.cnt FROM (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 @@ -2274,8 +2235,7 @@ JOIN (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 ON subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a join -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT subq1.key, subq1.cnt+subq2.cnt FROM (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 @@ -2570,16 +2530,14 @@ POSTHOOK: Input: default@outputtbl1 3 2 7 2 8 4 -PREHOOK: query: -- group by followed by a join where one of the sub-queries can be performed in the mapper -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM (SELECT key, count(1) FROM T1 GROUP BY key) subq1 JOIN (SELECT key, val, count(1) FROM T1 GROUP BY key, val) subq2 ON subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a join where one of the sub-queries can be performed in the mapper -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM (SELECT key, count(1) FROM T1 GROUP BY key) subq1 JOIN @@ -2830,25 +2788,21 @@ CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T2 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T2 select key, val from T1 +PREHOOK: query: INSERT OVERWRITE TABLE T2 select key, val from T1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Output: default@t2 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T2 select key, val from T1 +POSTHOOK: query: INSERT OVERWRITE TABLE T2 select key, val from T1 POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t2 POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: -- no mapside sort group by if the group by is a prefix of the sorted key -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T2 GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- no mapside sort group by if the group by is a prefix of the sorted key -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T2 GROUP BY key POSTHOOK: type: QUERY @@ -3044,15 +2998,11 @@ POSTHOOK: Input: default@outputtbl1 3 1 7 1 8 2 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val POSTHOOK: type: QUERY @@ -3238,15 +3188,11 @@ POSTHOOK: query: CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 i POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl5 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys followed by anything -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl5 SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys followed by anything -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl5 SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 POSTHOOK: type: QUERY @@ -3427,15 +3373,13 @@ POSTHOOK: Input: default@outputtbl5 7 1 17 2 1 8 1 18 2 1 8 1 28 2 1 -PREHOOK: query: -- contants from sub-queries should work fine -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, constant, val, count(1) from (SELECT key, 1 as constant, val from T2)subq group by key, constant, val PREHOOK: type: QUERY -POSTHOOK: query: -- contants from sub-queries should work fine -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, constant, val, count(1) from (SELECT key, 1 as constant, val from T2)subq @@ -3619,8 +3563,7 @@ POSTHOOK: Input: default@outputtbl4 7 1 17 1 8 1 18 1 8 1 28 1 -PREHOOK: query: -- multiple levels of contants from sub-queries should work fine -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 select key, constant3, val, count(1) from ( @@ -3629,8 +3572,7 @@ SELECT key, constant as constant2, val, 2 as constant3 from )subq2 group by key, constant3, val PREHOOK: type: QUERY -POSTHOOK: query: -- multiple levels of contants from sub-queries should work fine -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 select key, constant3, val, count(1) from ( @@ -4000,14 +3942,12 @@ POSTHOOK: Input: default@dest2 7 17 1 8 18 1 8 28 1 -PREHOOK: query: -- multi-table insert with a sub-query -EXPLAIN +PREHOOK: query: EXPLAIN FROM (select key, val from T2 where key = 8) x INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val PREHOOK: type: QUERY -POSTHOOK: query: -- multi-table insert with a sub-query -EXPLAIN +POSTHOOK: query: EXPLAIN FROM (select key, val from T2 where key = 8) x INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val diff --git a/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out b/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out index f7f4dbb..932f343 100644 --- a/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out +++ b/ql/src/test/results/clientpositive/spark/groupby_sort_skew_1_23.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) --- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) --- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -22,13 +16,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE T1 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t1 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +PREHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Output: default@t1 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T1 select key, val from T1 +POSTHOOK: query: INSERT OVERWRITE TABLE T1 select key, val from T1 POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 @@ -42,15 +34,11 @@ POSTHOOK: query: CREATE TABLE outputTbl1(key int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl1 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key --- matches the sorted key -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T1 GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key --- matches the sorted key -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T1 GROUP BY key POSTHOOK: type: QUERY @@ -233,13 +221,11 @@ POSTHOOK: query: CREATE TABLE outputTbl2(key1 int, key2 string, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl2 -PREHOOK: query: -- no map-side group by even if the group by key is a superset of sorted key -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl2 SELECT key, val, count(1) FROM T1 GROUP BY key, val PREHOOK: type: QUERY -POSTHOOK: query: -- no map-side group by even if the group by key is a superset of sorted key -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl2 SELECT key, val, count(1) FROM T1 GROUP BY key, val POSTHOOK: type: QUERY @@ -455,13 +441,11 @@ POSTHOOK: Input: default@outputtbl2 7 17 1 8 18 1 8 28 1 -PREHOOK: query: -- It should work for sub-queries -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- It should work for sub-queries -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key POSTHOOK: type: QUERY @@ -636,13 +620,11 @@ POSTHOOK: Input: default@outputtbl1 3 1 7 1 8 2 -PREHOOK: query: -- It should work for sub-queries with column aliases -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k PREHOOK: type: QUERY -POSTHOOK: query: -- It should work for sub-queries with column aliases -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT k, count(1) FROM (SELECT key as k, val as v FROM T1) subq1 GROUP BY k POSTHOOK: type: QUERY @@ -825,15 +807,11 @@ POSTHOOK: query: CREATE TABLE outputTbl3(key1 int, key2 int, cnt int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl3 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant followed --- by a match to the sorted key -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl3 SELECT 1, key, count(1) FROM T1 GROUP BY 1, key PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant followed --- by a match to the sorted key -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl3 SELECT 1, key, count(1) FROM T1 GROUP BY 1, key POSTHOOK: type: QUERY @@ -1017,13 +995,11 @@ POSTHOOK: query: CREATE TABLE outputTbl4(key1 int, key2 int, key3 string, cnt in POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl4 -PREHOOK: query: -- no map-side group by if the group by key contains a constant followed by another column -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val PREHOOK: type: QUERY -POSTHOOK: query: -- no map-side group by if the group by key contains a constant followed by another column -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, 1, val, count(1) FROM T1 GROUP BY key, 1, val POSTHOOK: type: QUERY @@ -1240,13 +1216,11 @@ POSTHOOK: Input: default@outputtbl4 7 1 17 1 8 1 18 1 8 1 28 1 -PREHOOK: query: -- no map-side group by if the group by key contains a function -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl3 SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 PREHOOK: type: QUERY -POSTHOOK: query: -- no map-side group by if the group by key contains a function -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl3 SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 POSTHOOK: type: QUERY @@ -1461,21 +1435,13 @@ POSTHOOK: Input: default@outputtbl3 3 4 1 7 8 1 8 9 2 -PREHOOK: query: -- it should not matter what follows the group by --- test various cases - --- group by followed by another group by -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT cast(key + key as string), sum(cnt) from (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 group by key + key PREHOOK: type: QUERY -POSTHOOK: query: -- it should not matter what follows the group by --- test various cases - --- group by followed by another group by -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT cast(key + key as string), sum(cnt) from (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 @@ -1705,8 +1671,7 @@ POSTHOOK: Input: default@outputtbl1 2 1 4 1 6 1 -PREHOOK: query: -- group by followed by a union -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( SELECT key, count(1) FROM T1 GROUP BY key @@ -1714,8 +1679,7 @@ SELECT key, count(1) FROM T1 GROUP BY key SELECT key, count(1) FROM T1 GROUP BY key ) subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a union -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( SELECT key, count(1) FROM T1 GROUP BY key @@ -2011,8 +1975,7 @@ POSTHOOK: Input: default@outputtbl1 7 1 8 2 8 2 -PREHOOK: query: -- group by followed by a union where one of the sub-queries is map-side group by -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( SELECT key, count(1) FROM T1 GROUP BY key @@ -2020,8 +1983,7 @@ SELECT key, count(1) FROM T1 GROUP BY key SELECT cast(key + key as string) as key, count(1) FROM T1 GROUP BY key + key ) subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a union where one of the sub-queries is map-side group by -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT * FROM ( SELECT key, count(1) FROM T1 GROUP BY key @@ -2360,8 +2322,7 @@ POSTHOOK: Input: default@outputtbl1 6 1 7 1 8 2 -PREHOOK: query: -- group by followed by a join -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT subq1.key, subq1.cnt+subq2.cnt FROM (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 @@ -2369,8 +2330,7 @@ JOIN (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq2 ON subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a join -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT subq1.key, subq1.cnt+subq2.cnt FROM (SELECT key, count(1) as cnt FROM T1 GROUP BY key) subq1 @@ -2665,16 +2625,14 @@ POSTHOOK: Input: default@outputtbl1 3 2 7 2 8 4 -PREHOOK: query: -- group by followed by a join where one of the sub-queries can be performed in the mapper -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM (SELECT key, count(1) FROM T1 GROUP BY key) subq1 JOIN (SELECT key, val, count(1) FROM T1 GROUP BY key, val) subq2 ON subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- group by followed by a join where one of the sub-queries can be performed in the mapper -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM (SELECT key, count(1) FROM T1 GROUP BY key) subq1 JOIN @@ -2944,25 +2902,21 @@ CLUSTERED BY (key, val) SORTED BY (key, val) INTO 2 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T2 -PREHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T2 select key, val from T1 +PREHOOK: query: INSERT OVERWRITE TABLE T2 select key, val from T1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Output: default@t2 -POSTHOOK: query: -- perform an insert to make sure there are 2 files -INSERT OVERWRITE TABLE T2 select key, val from T1 +POSTHOOK: query: INSERT OVERWRITE TABLE T2 select key, val from T1 POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t2 POSTHOOK: Lineage: t2.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: t2.val SIMPLE [(t1)t1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: -- no mapside sort group by if the group by is a prefix of the sorted key -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T2 GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- no mapside sort group by if the group by is a prefix of the sorted key -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl1 SELECT key, count(1) FROM T2 GROUP BY key POSTHOOK: type: QUERY @@ -3177,15 +3131,11 @@ POSTHOOK: Input: default@outputtbl1 3 1 7 1 8 2 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, 1, val, count(1) FROM T2 GROUP BY key, 1, val POSTHOOK: type: QUERY @@ -3371,15 +3321,11 @@ POSTHOOK: query: CREATE TABLE outputTbl5(key1 int, key2 int, key3 string, key4 i POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@outputTbl5 -PREHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys followed by anything -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl5 SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 PREHOOK: type: QUERY -POSTHOOK: query: -- The plan should be converted to a map-side group by if the group by key contains a constant in between the --- sorted keys followed by anything -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl5 SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2 POSTHOOK: type: QUERY @@ -3560,15 +3506,13 @@ POSTHOOK: Input: default@outputtbl5 7 1 17 2 1 8 1 18 2 1 8 1 28 2 1 -PREHOOK: query: -- contants from sub-queries should work fine -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, constant, val, count(1) from (SELECT key, 1 as constant, val from T2)subq group by key, constant, val PREHOOK: type: QUERY -POSTHOOK: query: -- contants from sub-queries should work fine -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 SELECT key, constant, val, count(1) from (SELECT key, 1 as constant, val from T2)subq @@ -3752,8 +3696,7 @@ POSTHOOK: Input: default@outputtbl4 7 1 17 1 8 1 18 1 8 1 28 1 -PREHOOK: query: -- multiple levels of contants from sub-queries should work fine -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 select key, constant3, val, count(1) from ( @@ -3762,8 +3705,7 @@ SELECT key, constant as constant2, val, 2 as constant3 from )subq2 group by key, constant3, val PREHOOK: type: QUERY -POSTHOOK: query: -- multiple levels of contants from sub-queries should work fine -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE outputTbl4 select key, constant3, val, count(1) from ( @@ -4148,14 +4090,12 @@ POSTHOOK: Input: default@dest2 7 17 1 8 18 1 8 28 1 -PREHOOK: query: -- multi-table insert with a sub-query -EXPLAIN +PREHOOK: query: EXPLAIN FROM (select key, val from T2 where key = 8) x INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val PREHOOK: type: QUERY -POSTHOOK: query: -- multi-table insert with a sub-query -EXPLAIN +POSTHOOK: query: EXPLAIN FROM (select key, val from T2 where key = 8) x INSERT OVERWRITE TABLE DEST1 SELECT key, count(1) GROUP BY key INSERT OVERWRITE TABLE DEST2 SELECT key, val, count(1) GROUP BY key, val diff --git a/ql/src/test/results/clientpositive/spark/having.q.out b/ql/src/test/results/clientpositive/spark/having.q.out index cf44459..2876e67 100644 --- a/ql/src/test/results/clientpositive/spark/having.q.out +++ b/ql/src/test/results/clientpositive/spark/having.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS -EXPLAIN SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3 +PREHOOK: query: EXPLAIN SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS -EXPLAIN SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3 +POSTHOOK: query: EXPLAIN SELECT count(value) AS c FROM src GROUP BY key HAVING c > 3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/index_auto_self_join.q.out b/ql/src/test/results/clientpositive/spark/index_auto_self_join.q.out index e4cf7f3..19454a8 100644 --- a/ql/src/test/results/clientpositive/spark/index_auto_self_join.q.out +++ b/ql/src/test/results/clientpositive/spark/index_auto_self_join.q.out @@ -1,12 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- try the query without indexing, with manual indexing, and with automatic indexing - -EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 +PREHOOK: query: EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS --- try the query without indexing, with manual indexing, and with automatic indexing - -EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 +POSTHOOK: query: EXPLAIN SELECT a.key, b.key FROM src a JOIN src b ON (a.value = b.value) WHERE a.key > 80 AND a.key < 100 AND b.key > 70 AND b.key < 90 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/index_bitmap3.q.out b/ql/src/test/results/clientpositive/spark/index_bitmap3.q.out index b16989c..31c921c 100644 --- a/ql/src/test/results/clientpositive/spark/index_bitmap3.q.out +++ b/ql/src/test/results/clientpositive/spark/index_bitmap3.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/index_bitmap_auto.q.out b/ql/src/test/results/clientpositive/spark/index_bitmap_auto.q.out index f9e8e3d..e3b205c 100644 --- a/ql/src/test/results/clientpositive/spark/index_bitmap_auto.q.out +++ b/ql/src/test/results/clientpositive/spark/index_bitmap_auto.q.out @@ -1,28 +1,18 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- try the query without indexing, with manual indexing, and with automatic indexing --- without indexing -SELECT key, value FROM src WHERE key=0 AND value = "val_0" +PREHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0" PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- try the query without indexing, with manual indexing, and with automatic indexing --- without indexing -SELECT key, value FROM src WHERE key=0 AND value = "val_0" +POSTHOOK: query: SELECT key, value FROM src WHERE key=0 AND value = "val_0" POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 0 val_0 0 val_0 0 val_0 -PREHOOK: query: -- create indices -EXPLAIN +PREHOOK: query: EXPLAIN CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD PREHOOK: type: CREATEINDEX -POSTHOOK: query: -- create indices -EXPLAIN +POSTHOOK: query: EXPLAIN CREATE INDEX src1_index ON TABLE src(key) as 'BITMAP' WITH DEFERRED REBUILD POSTHOOK: type: CREATEINDEX STAGE DEPENDENCIES: @@ -97,8 +87,7 @@ POSTHOOK: query: SELECT * FROM default__src_src2_index__ POSTHOOK: type: QUERY POSTHOOK: Input: default@default__src_src2_index__ #### A masked pattern was here #### -PREHOOK: query: -- manual indexing -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets` FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__ WHERE key = 0) a @@ -109,8 +98,7 @@ FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bit a.bucketname = b.bucketname AND a.offset = b.offset WHERE NOT EWAH_BITMAP_EMPTY(EWAH_BITMAP_AND(a.bitmaps, b.bitmaps)) GROUP BY a.bucketname PREHOOK: type: QUERY -POSTHOOK: query: -- manual indexing -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.bucketname AS `_bucketname`, COLLECT_SET(a.offset) as `_offsets` FROM (SELECT `_bucketname` AS bucketname, `_offset` AS offset, `_bitmaps` AS bitmaps FROM default__src_src1_index__ WHERE key = 0) a diff --git a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out index 33d795b..1efb81b 100644 --- a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out +++ b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_bucketed_table.q.out @@ -1,27 +1,19 @@ -PREHOOK: query: -- Test writing to a bucketed table, the output should be bucketed by the bucketing key into the --- a number of files equal to the number of buckets -CREATE TABLE test_table_bucketed (key STRING, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE test_table_bucketed (key STRING, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (value) SORTED BY (value) INTO 3 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table_bucketed -POSTHOOK: query: -- Test writing to a bucketed table, the output should be bucketed by the bucketing key into the --- a number of files equal to the number of buckets -CREATE TABLE test_table_bucketed (key STRING, value STRING) PARTITIONED BY (part STRING) +POSTHOOK: query: CREATE TABLE test_table_bucketed (key STRING, value STRING) PARTITIONED BY (part STRING) CLUSTERED BY (value) SORTED BY (value) INTO 3 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table_bucketed -PREHOOK: query: -- Despite the fact that normally inferring would say this table is bucketed and sorted on key, --- this should be bucketed and sorted by value into 3 buckets -INSERT OVERWRITE TABLE test_table_bucketed PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table_bucketed PARTITION (part = '1') SELECT key, count(1) FROM src GROUP BY KEY PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table_bucketed@part=1 -POSTHOOK: query: -- Despite the fact that normally inferring would say this table is bucketed and sorted on key, --- this should be bucketed and sorted by value into 3 buckets -INSERT OVERWRITE TABLE test_table_bucketed PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table_bucketed PARTITION (part = '1') SELECT key, count(1) FROM src GROUP BY KEY POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -67,16 +59,12 @@ Bucket Columns: [value] Sort Columns: [Order(col:value, order:1)] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- If the count(*) from sampling the buckets matches the count(*) from each file, the table is --- bucketed -SELECT COUNT(*) FROM test_table_bucketed TABLESAMPLE (BUCKET 1 OUT OF 3) WHERE part = '1' +PREHOOK: query: SELECT COUNT(*) FROM test_table_bucketed TABLESAMPLE (BUCKET 1 OUT OF 3) WHERE part = '1' PREHOOK: type: QUERY PREHOOK: Input: default@test_table_bucketed PREHOOK: Input: default@test_table_bucketed@part=1 #### A masked pattern was here #### -POSTHOOK: query: -- If the count(*) from sampling the buckets matches the count(*) from each file, the table is --- bucketed -SELECT COUNT(*) FROM test_table_bucketed TABLESAMPLE (BUCKET 1 OUT OF 3) WHERE part = '1' +POSTHOOK: query: SELECT COUNT(*) FROM test_table_bucketed TABLESAMPLE (BUCKET 1 OUT OF 3) WHERE part = '1' POSTHOOK: type: QUERY POSTHOOK: Input: default@test_table_bucketed POSTHOOK: Input: default@test_table_bucketed@part=1 diff --git a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_map_operators.q.out b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_map_operators.q.out index d690e00..3152663 100644 --- a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_map_operators.q.out +++ b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_map_operators.q.out @@ -1,17 +1,9 @@ -PREHOOK: query: -- This tests inferring how data is bucketed/sorted from the operators in the reducer --- and populating that information in partitions' metadata, in particular, this tests --- that operators in the mapper have no effect - -CREATE TABLE test_table1 (key STRING, value STRING) +PREHOOK: query: CREATE TABLE test_table1 (key STRING, value STRING) CLUSTERED BY (key) SORTED BY (key DESC) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- This tests inferring how data is bucketed/sorted from the operators in the reducer --- and populating that information in partitions' metadata, in particular, this tests --- that operators in the mapper have no effect - -CREATE TABLE test_table1 (key STRING, value STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key STRING, value STRING) CLUSTERED BY (key) SORTED BY (key DESC) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -54,12 +46,10 @@ POSTHOOK: query: CREATE TABLE test_table_out (key STRING, value STRING) PARTITIO POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table_out -PREHOOK: query: -- Test map group by doesn't affect inference, should not be bucketed or sorted -EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') SELECT key, count(*) FROM test_table1 GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- Test map group by doesn't affect inference, should not be bucketed or sorted -EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') SELECT key, count(*) FROM test_table1 GROUP BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -163,8 +153,7 @@ Sort Columns: [] Storage Desc Params: serialization.format 1 WARNING: Comparing a bigint and a string may result in a loss of precision. -PREHOOK: query: -- Test map group by doesn't affect inference, should be bucketed and sorted by value -EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') SELECT a.key, a.value FROM ( SELECT key, count(*) AS value FROM test_table1 GROUP BY key ) a JOIN ( @@ -172,8 +161,7 @@ SELECT a.key, a.value FROM ( ) b ON (a.value = b.value) PREHOOK: type: QUERY -POSTHOOK: query: -- Test map group by doesn't affect inference, should be bucketed and sorted by value -EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') SELECT a.key, a.value FROM ( SELECT key, count(*) AS value FROM test_table1 GROUP BY key ) a JOIN ( @@ -331,12 +319,10 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test SMB join doesn't affect inference, should not be bucketed or sorted -EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') SELECT /*+ MAPJOIN(a) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Test SMB join doesn't affect inference, should not be bucketed or sorted -EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') SELECT /*+ MAPJOIN(a) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -446,13 +432,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test SMB join doesn't affect inference, should be bucketed and sorted by key -EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') SELECT /*+ MAPJOIN(a) */ b.value, count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key GROUP BY b.value PREHOOK: type: QUERY -POSTHOOK: query: -- Test SMB join doesn't affect inference, should be bucketed and sorted by key -EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table_out PARTITION (part = '1') SELECT /*+ MAPJOIN(a) */ b.value, count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key GROUP BY b.value POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_merge.q.out b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_merge.q.out index 34bf946..175ddd6 100644 --- a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_merge.q.out +++ b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_merge.q.out @@ -1,27 +1,17 @@ -PREHOOK: query: -- This tests inferring how data is bucketed/sorted from the operators in the reducer --- and populating that information in partitions' metadata. In particular, those cases --- where where merging may or may not be used. - -CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table -POSTHOOK: query: -- This tests inferring how data is bucketed/sorted from the operators in the reducer --- and populating that information in partitions' metadata. In particular, those cases --- where where merging may or may not be used. - -CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) +POSTHOOK: query: CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table -PREHOOK: query: -- Tests a reduce task followed by a merge. The output should be neither bucketed nor sorted. -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Tests a reduce task followed by a merge. The output should be neither bucketed nor sorted. -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -67,14 +57,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Tests a reduce task followed by a move. The output should be bucketed and sorted. -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Tests a reduce task followed by a move. The output should be bucketed and sorted. -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, b.value FROM src a JOIN src b ON a.key = b.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out index 216f4cf..13219ac 100644 --- a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out +++ b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_num_buckets.q.out @@ -6,20 +6,7 @@ POSTHOOK: query: CREATE TABLE test_table (key INT, value STRING) PARTITIONED BY POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table -PREHOOK: query: -- Tests dynamic partitions where bucketing/sorting can be inferred, but not all reducers write --- all partitions. The subquery produces rows as follows --- key = 0: --- 0, , 0 --- key = 1: --- 0, , 1 --- key = 2: --- 1, , 0 --- This means that by distributing by the first column into two reducers, and using the third --- columns as a dynamic partition, the dynamic partition for 0 will get written in both reducers --- and the partition for 1 will get written in one reducer. So hr=0 should be bucketed by key --- and hr=1 should not. - -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table PARTITION (ds = '2008-04-08', hr) SELECT key2, value, cast(hr as int) FROM (SELECT if ((key % 3) < 2, 0, 1) as key2, value, (key % 2) as hr @@ -27,20 +14,7 @@ FROM srcpart WHERE ds = '2008-04-08') a DISTRIBUTE BY key2 PREHOOK: type: QUERY -POSTHOOK: query: -- Tests dynamic partitions where bucketing/sorting can be inferred, but not all reducers write --- all partitions. The subquery produces rows as follows --- key = 0: --- 0, , 0 --- key = 1: --- 0, , 1 --- key = 2: --- 1, , 0 --- This means that by distributing by the first column into two reducers, and using the third --- columns as a dynamic partition, the dynamic partition for 0 will get written in both reducers --- and the partition for 1 will get written in one reducer. So hr=0 should be bucketed by key --- and hr=1 should not. - -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table PARTITION (ds = '2008-04-08', hr) SELECT key2, value, cast(hr as int) FROM (SELECT if ((key % 3) < 2, 0, 1) as key2, value, (key % 2) as hr diff --git a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_reducers_power_two.q.out b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_reducers_power_two.q.out index 8e50d23..bf010e7 100644 --- a/ql/src/test/results/clientpositive/spark/infer_bucket_sort_reducers_power_two.q.out +++ b/ql/src/test/results/clientpositive/spark/infer_bucket_sort_reducers_power_two.q.out @@ -1,27 +1,17 @@ -PREHOOK: query: -- This tests inferring how data is bucketed/sorted from the operators in the reducer --- and populating that information in partitions' metadata, it also verifies that the --- number of reducers chosen will be a power of two - -CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) +PREHOOK: query: CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table -POSTHOOK: query: -- This tests inferring how data is bucketed/sorted from the operators in the reducer --- and populating that information in partitions' metadata, it also verifies that the --- number of reducers chosen will be a power of two - -CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) +POSTHOOK: query: CREATE TABLE test_table (key STRING, value STRING) PARTITIONED BY (part STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table -PREHOOK: query: -- Test group by, should be bucketed and sorted by group by key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, count(*) FROM src GROUP BY key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test group by, should be bucketed and sorted by group by key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT key, count(*) FROM src GROUP BY key POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -67,14 +57,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test join, should be bucketed and sorted by join key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test join, should be bucketed and sorted by join key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -120,14 +108,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test join with two keys, should be bucketed and sorted by join keys -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key AND a.value = b.value PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test join with two keys, should be bucketed and sorted by join keys -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, a.value FROM src a JOIN src b ON a.key = b.key AND a.value = b.value POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -173,14 +159,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test join on three tables on same key, should be bucketed and sorted by join key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.key = c.key) PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test join on three tables on same key, should be bucketed and sorted by join key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.key = c.key) POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -226,14 +210,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test join on three tables on different keys, should be bucketed and sorted by latter key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.value = c.value) PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test join on three tables on different keys, should be bucketed and sorted by latter key -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT a.key, c.value FROM src a JOIN src b ON (a.key = b.key) JOIN src c ON (b.value = c.value) POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -279,16 +261,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Test group by in subquery with another group by outside, should be bucketed and sorted by the --- key of the outer group by -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +PREHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT count(1), value FROM (SELECT key, count(1) as value FROM src group by key) a group by value PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_table@part=1 -POSTHOOK: query: -- Test group by in subquery with another group by outside, should be bucketed and sorted by the --- key of the outer group by -INSERT OVERWRITE TABLE test_table PARTITION (part = '1') +POSTHOOK: query: INSERT OVERWRITE TABLE test_table PARTITION (part = '1') SELECT count(1), value FROM (SELECT key, count(1) as value FROM src group by key) a group by value POSTHOOK: type: QUERY POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/spark/innerjoin.q.out b/ql/src/test/results/clientpositive/spark/innerjoin.q.out index 71e1529..2f49406 100644 --- a/ql/src/test/results/clientpositive/spark/innerjoin.q.out +++ b/ql/src/test/results/clientpositive/spark/innerjoin.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 @@ -1149,15 +1145,11 @@ POSTHOOK: Input: default@dest_j1 98 val_98 98 val_98 98 val_98 -PREHOOK: query: -- verify that INNER is a non-reserved word for backwards compatibility --- change from HIVE-6617, inner is a SQL2011 reserved keyword. -create table `inner`(i int) +PREHOOK: query: create table `inner`(i int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inner -POSTHOOK: query: -- verify that INNER is a non-reserved word for backwards compatibility --- change from HIVE-6617, inner is a SQL2011 reserved keyword. -create table `inner`(i int) +POSTHOOK: query: create table `inner`(i int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inner diff --git a/ql/src/test/results/clientpositive/spark/input12.q.out b/ql/src/test/results/clientpositive/spark/input12.q.out index 4317186..2efd81b 100644 --- a/ql/src/test/results/clientpositive/spark/input12.q.out +++ b/ql/src/test/results/clientpositive/spark/input12.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS( 0.20S) - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/input14.q.out b/ql/src/test/results/clientpositive/spark/input14.q.out index 3133cfe..36f162e 100644 --- a/ql/src/test/results/clientpositive/spark/input14.q.out +++ b/ql/src/test/results/clientpositive/spark/input14.q.out @@ -111,15 +111,11 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@dest1 POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT dest1.* FROM dest1 +PREHOOK: query: SELECT dest1.* FROM dest1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1.* FROM dest1 POSTHOOK: type: QUERY POSTHOOK: Input: default@dest1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/input16_cc.q.out b/ql/src/test/results/clientpositive/spark/input16_cc.q.out index 9ea3472..1b61e16 100644 --- a/ql/src/test/results/clientpositive/spark/input16_cc.q.out +++ b/ql/src/test/results/clientpositive/spark/input16_cc.q.out @@ -1,12 +1,6 @@ -PREHOOK: query: -- TestSerDe is a user defined serde where the default delimiter is Ctrl-B --- the user is overwriting it with ctrlC - -DROP TABLE INPUT16_CC +PREHOOK: query: DROP TABLE INPUT16_CC PREHOOK: type: DROPTABLE -POSTHOOK: query: -- TestSerDe is a user defined serde where the default delimiter is Ctrl-B --- the user is overwriting it with ctrlC - -DROP TABLE INPUT16_CC +POSTHOOK: query: DROP TABLE INPUT16_CC POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE INPUT16_CC(KEY STRING, VALUE STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.TestSerDe' with serdeproperties ('testserde.default.serialization.format'='\003', 'dummy.prop.not.used'='dummyy.val') STORED AS TEXTFILE PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/spark/input17.q.out b/ql/src/test/results/clientpositive/spark/input17.q.out index 44822e0..d95dbcb 100644 --- a/ql/src/test/results/clientpositive/spark/input17.q.out +++ b/ql/src/test/results/clientpositive/spark/input17.q.out @@ -108,15 +108,11 @@ POSTHOOK: Input: default@src_thrift POSTHOOK: Output: default@dest1 POSTHOOK: Lineage: dest1.key SCRIPT [(src_thrift)src_thrift.FieldSchema(name:aint, type:int, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lintstring, type:array>, comment:from deserializer), ] POSTHOOK: Lineage: dest1.value SCRIPT [(src_thrift)src_thrift.FieldSchema(name:aint, type:int, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lint, type:array, comment:from deserializer), (src_thrift)src_thrift.FieldSchema(name:lintstring, type:array>, comment:from deserializer), ] -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT dest1.* FROM dest1 +PREHOOK: query: SELECT dest1.* FROM dest1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1.* FROM dest1 POSTHOOK: type: QUERY POSTHOOK: Input: default@dest1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/input18.q.out b/ql/src/test/results/clientpositive/spark/input18.q.out index af8707c..65850b2 100644 --- a/ql/src/test/results/clientpositive/spark/input18.q.out +++ b/ql/src/test/results/clientpositive/spark/input18.q.out @@ -111,15 +111,11 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@dest1 POSTHOOK: Lineage: dest1.key SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: dest1.value SCRIPT [(src)src.FieldSchema(name:key, type:string, comment:default), (src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT dest1.* FROM dest1 +PREHOOK: query: SELECT dest1.* FROM dest1 PREHOOK: type: QUERY PREHOOK: Input: default@dest1 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT dest1.* FROM dest1 +POSTHOOK: query: SELECT dest1.* FROM dest1 POSTHOOK: type: QUERY POSTHOOK: Input: default@dest1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/input1_limit.q.out b/ql/src/test/results/clientpositive/spark/input1_limit.q.out index c41093d..dd49287 100644 --- a/ql/src/test/results/clientpositive/spark/input1_limit.q.out +++ b/ql/src/test/results/clientpositive/spark/input1_limit.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/input_part2.q.out b/ql/src/test/results/clientpositive/spark/input_part2.q.out index 36bb40f..06ed93f 100644 --- a/ql/src/test/results/clientpositive/spark/input_part2.q.out +++ b/ql/src/test/results/clientpositive/spark/input_part2.q.out @@ -14,16 +14,12 @@ POSTHOOK: query: CREATE TABLE dest2(key INT, value STRING, hr STRING, ds STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest2 -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED FROM srcpart INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' INSERT OVERWRITE TABLE dest2 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12' PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED FROM srcpart INSERT OVERWRITE TABLE dest1 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-08' and srcpart.hr = '12' INSERT OVERWRITE TABLE dest2 SELECT srcpart.key, srcpart.value, srcpart.hr, srcpart.ds WHERE srcpart.key < 100 and srcpart.ds = '2008-04-09' and srcpart.hr = '12' diff --git a/ql/src/test/results/clientpositive/spark/insert_into1.q.out b/ql/src/test/results/clientpositive/spark/insert_into1.q.out index 764e29e..dff389d 100644 --- a/ql/src/test/results/clientpositive/spark/insert_into1.q.out +++ b/ql/src/test/results/clientpositive/spark/insert_into1.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE insert_into1 +PREHOOK: query: DROP TABLE insert_into1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE insert_into1 +POSTHOOK: query: DROP TABLE insert_into1 POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE insert_into1 (key int, value string) PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/spark/join0.q.out b/ql/src/test/results/clientpositive/spark/join0.q.out index ab569f5..0db05a3 100644 --- a/ql/src/test/results/clientpositive/spark/join0.q.out +++ b/ql/src/test/results/clientpositive/spark/join0.q.out @@ -1,7 +1,5 @@ Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Work 'Reducer 2' is a cross product -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 FROM (SELECT * FROM src WHERE src.key < 10) src1 @@ -9,9 +7,7 @@ SELECT src1.key as k1, src1.value as v1, (SELECT * FROM src WHERE src.key < 10) src2 SORT BY k1, v1, k2, v2 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 FROM (SELECT * FROM src WHERE src.key < 10) src1 diff --git a/ql/src/test/results/clientpositive/spark/join1.q.out b/ql/src/test/results/clientpositive/spark/join1.q.out index 843a13f..a0ee4ea 100644 --- a/ql/src/test/results/clientpositive/spark/join1.q.out +++ b/ql/src/test/results/clientpositive/spark/join1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/spark/join10.q.out b/ql/src/test/results/clientpositive/spark/join10.q.out index 134436b..80fa3f2 100644 --- a/ql/src/test/results/clientpositive/spark/join10.q.out +++ b/ql/src/test/results/clientpositive/spark/join10.q.out @@ -1,15 +1,11 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN FROM +PREHOOK: query: EXPLAIN FROM (SELECT src.* FROM src) x JOIN (SELECT src.* FROM src) Y ON (x.key = Y.key) SELECT Y.* PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN FROM +POSTHOOK: query: EXPLAIN FROM (SELECT src.* FROM src) x JOIN (SELECT src.* FROM src) Y diff --git a/ql/src/test/results/clientpositive/spark/join11.q.out b/ql/src/test/results/clientpositive/spark/join11.q.out index 193f3c9..8e245e9 100644 --- a/ql/src/test/results/clientpositive/spark/join11.q.out +++ b/ql/src/test/results/clientpositive/spark/join11.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src) src1 @@ -8,9 +6,7 @@ JOIN (SELECT src.key as c3, src.value as c4 from src) src2 ON src1.c1 = src2.c3 AND src1.c1 < 100 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src) src1 diff --git a/ql/src/test/results/clientpositive/spark/join12.q.out b/ql/src/test/results/clientpositive/spark/join12.q.out index 37f1c36..88bacf5 100644 --- a/ql/src/test/results/clientpositive/spark/join12.q.out +++ b/ql/src/test/results/clientpositive/spark/join12.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src) src1 @@ -11,9 +9,7 @@ JOIN (SELECT src.key as c5, src.value as c6 from src) src3 ON src1.c1 = src3.c5 AND src3.c5 < 80 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src) src1 diff --git a/ql/src/test/results/clientpositive/spark/join13.q.out b/ql/src/test/results/clientpositive/spark/join13.q.out index 8b9dd6a..69d64e2 100644 --- a/ql/src/test/results/clientpositive/spark/join13.q.out +++ b/ql/src/test/results/clientpositive/spark/join13.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src) src1 @@ -11,9 +9,7 @@ JOIN (SELECT src.key as c5, src.value as c6 from src) src3 ON src1.c1 + src2.c3 = src3.c5 AND src3.c5 < 200 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src) src1 diff --git a/ql/src/test/results/clientpositive/spark/join14.q.out b/ql/src/test/results/clientpositive/spark/join14.q.out index a511440..e804a1d 100644 --- a/ql/src/test/results/clientpositive/spark/join14.q.out +++ b/ql/src/test/results/clientpositive/spark/join14.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) - -CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) - -CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/join15.q.out b/ql/src/test/results/clientpositive/spark/join15.q.out index 453dd1c..8623359 100644 --- a/ql/src/test/results/clientpositive/spark/join15.q.out +++ b/ql/src/test/results/clientpositive/spark/join15.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src src1 JOIN src src2 ON (src1.key = src2.key) SORT BY src1.key, src1.value, src2.key, src2.value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src src1 JOIN src src2 ON (src1.key = src2.key) SORT BY src1.key, src1.value, src2.key, src2.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/join17.q.out b/ql/src/test/results/clientpositive/spark/join17.q.out index 66a8829..83e7281 100644 --- a/ql/src/test/results/clientpositive/spark/join17.q.out +++ b/ql/src/test/results/clientpositive/spark/join17.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key1 INT, value1 STRING, key2 INT, value2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/join18.q.out b/ql/src/test/results/clientpositive/spark/join18.q.out index 8e97853..1a949b2 100644 --- a/ql/src/test/results/clientpositive/spark/join18.q.out +++ b/ql/src/test/results/clientpositive/spark/join18.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.key, a.value, b.key, b.value FROM ( @@ -13,9 +11,7 @@ EXPLAIN ) b ON (a.key = b.key) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.key, a.value, b.key, b.value FROM ( diff --git a/ql/src/test/results/clientpositive/spark/join18_multi_distinct.q.out b/ql/src/test/results/clientpositive/spark/join18_multi_distinct.q.out index e7758b1..d0ae0ba 100644 --- a/ql/src/test/results/clientpositive/spark/join18_multi_distinct.q.out +++ b/ql/src/test/results/clientpositive/spark/join18_multi_distinct.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.key, a.value, b.key, b.value1, b.value2 FROM ( @@ -14,9 +12,7 @@ EXPLAIN ) b ON (a.key = b.key) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.key, a.value, b.key, b.value1, b.value2 FROM ( diff --git a/ql/src/test/results/clientpositive/spark/join19.q.out b/ql/src/test/results/clientpositive/spark/join19.q.out index 7c31fd3..35d9942 100644 --- a/ql/src/test/results/clientpositive/spark/join19.q.out +++ b/ql/src/test/results/clientpositive/spark/join19.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE triples (foo string, subject string, predicate string, object string, foo2 string) +PREHOOK: query: CREATE TABLE triples (foo string, subject string, predicate string, object string, foo2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@triples -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE triples (foo string, subject string, predicate string, object string, foo2 string) +POSTHOOK: query: CREATE TABLE triples (foo string, subject string, predicate string, object string, foo2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@triples diff --git a/ql/src/test/results/clientpositive/spark/join2.q.out b/ql/src/test/results/clientpositive/spark/join2.q.out index 6f5d8ca..f684beb 100644 --- a/ql/src/test/results/clientpositive/spark/join2.q.out +++ b/ql/src/test/results/clientpositive/spark/join2.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j2 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j2(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j2 diff --git a/ql/src/test/results/clientpositive/spark/join20.q.out b/ql/src/test/results/clientpositive/spark/join20.q.out index 8d662d6..7b214f9 100644 --- a/ql/src/test/results/clientpositive/spark/join20.q.out +++ b/ql/src/test/results/clientpositive/spark/join20.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src src1 JOIN src src2 ON (src1.key = src2.key AND src1.key < 10) RIGHT OUTER JOIN src src3 ON (src1.key = src3.key AND src3.key < 20) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src src1 JOIN src src2 ON (src1.key = src2.key AND src1.key < 10) RIGHT OUTER JOIN src src3 ON (src1.key = src3.key AND src3.key < 20) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/join21.q.out b/ql/src/test/results/clientpositive/spark/join21.q.out index 5fe3255..f8cf23e 100644 --- a/ql/src/test/results/clientpositive/spark/join21.q.out +++ b/ql/src/test/results/clientpositive/spark/join21.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src src1 LEFT OUTER JOIN src src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) RIGHT OUTER JOIN src src3 ON (src2.key = src3.key AND src3.key < 10) SORT BY src1.key, src1.value, src2.key, src2.value, src3.key, src3.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/join23.q.out b/ql/src/test/results/clientpositive/spark/join23.q.out index 49697da..40039db 100644 --- a/ql/src/test/results/clientpositive/spark/join23.q.out +++ b/ql/src/test/results/clientpositive/spark/join23.q.out @@ -1,12 +1,8 @@ Warning: Shuffle Join JOIN[4][tables = [src1, src2]] in Work 'Reducer 2' is a cross product -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 SORT BY src1.key, src1.value, src2.key, src2.value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM src src1 JOIN src src2 WHERE src1.key < 10 and src2.key < 10 SORT BY src1.key, src1.value, src2.key, src2.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/join25.q.out b/ql/src/test/results/clientpositive/spark/join25.q.out index 1bbc0e4..bd44295 100644 --- a/ql/src/test/results/clientpositive/spark/join25.q.out +++ b/ql/src/test/results/clientpositive/spark/join25.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/spark/join26.q.out b/ql/src/test/results/clientpositive/spark/join26.q.out index cacfe42..eb8cd78 100644 --- a/ql/src/test/results/clientpositive/spark/join26.q.out +++ b/ql/src/test/results/clientpositive/spark/join26.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/spark/join27.q.out b/ql/src/test/results/clientpositive/spark/join27.q.out index eb26627..0dfad72 100644 --- a/ql/src/test/results/clientpositive/spark/join27.q.out +++ b/ql/src/test/results/clientpositive/spark/join27.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/spark/join28.q.out b/ql/src/test/results/clientpositive/spark/join28.q.out index 548a806..8d4d870 100644 --- a/ql/src/test/results/clientpositive/spark/join28.q.out +++ b/ql/src/test/results/clientpositive/spark/join28.q.out @@ -1,18 +1,12 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_j1 SELECT subq.key1, z.value FROM @@ -20,9 +14,7 @@ FROM FROM src1 x JOIN src y ON (x.key = y.key)) subq JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11) PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_j1 SELECT subq.key1, z.value FROM diff --git a/ql/src/test/results/clientpositive/spark/join29.q.out b/ql/src/test/results/clientpositive/spark/join29.q.out index 1549b02..573628f 100644 --- a/ql/src/test/results/clientpositive/spark/join29.q.out +++ b/ql/src/test/results/clientpositive/spark/join29.q.out @@ -1,26 +1,18 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, cnt1 INT, cnt2 INT) +PREHOOK: query: CREATE TABLE dest_j1(key STRING, cnt1 INT, cnt2 INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, cnt1 INT, cnt2 INT) +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, cnt1 INT, cnt2 INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_j1 SELECT subq1.key, subq1.cnt, subq2.cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key) PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_j1 SELECT subq1.key, subq1.cnt, subq2.cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN diff --git a/ql/src/test/results/clientpositive/spark/join3.q.out b/ql/src/test/results/clientpositive/spark/join3.q.out index b3f3340..e50f091 100644 --- a/ql/src/test/results/clientpositive/spark/join3.q.out +++ b/ql/src/test/results/clientpositive/spark/join3.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/join30.q.out b/ql/src/test/results/clientpositive/spark/join30.q.out index bd501e6..8360400 100644 --- a/ql/src/test/results/clientpositive/spark/join30.q.out +++ b/ql/src/test/results/clientpositive/spark/join30.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, cnt INT) +PREHOOK: query: CREATE TABLE dest_j1(key INT, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, cnt INT) +POSTHOOK: query: CREATE TABLE dest_j1(key INT, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/spark/join31.q.out b/ql/src/test/results/clientpositive/spark/join31.q.out index 25c3334..3fee7b8 100644 --- a/ql/src/test/results/clientpositive/spark/join31.q.out +++ b/ql/src/test/results/clientpositive/spark/join31.q.out @@ -1,27 +1,19 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, cnt INT) +PREHOOK: query: CREATE TABLE dest_j1(key STRING, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, cnt INT) +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_j1 SELECT subq1.key, count(1) as cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN (select y.key, count(1) as cnt from src y group by y.key) subq2 ON (subq1.key = subq2.key) group by subq1.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE dest_j1 SELECT subq1.key, count(1) as cnt FROM (select x.key, count(1) as cnt from src1 x group by x.key) subq1 JOIN diff --git a/ql/src/test/results/clientpositive/spark/join32.q.out b/ql/src/test/results/clientpositive/spark/join32.q.out index 7cecbc6..5c0eaba 100644 --- a/ql/src/test/results/clientpositive/spark/join32.q.out +++ b/ql/src/test/results/clientpositive/spark/join32.q.out @@ -1,26 +1,18 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11) PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) diff --git a/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out b/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out index 091c10b..f3df5c4 100644 --- a/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out +++ b/ql/src/test/results/clientpositive/spark/join32_lessSize.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 @@ -18,17 +14,13 @@ POSTHOOK: query: CREATE TABLE dest_j2(key STRING, value STRING, val2 STRING) STO POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j2 -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11) PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) diff --git a/ql/src/test/results/clientpositive/spark/join33.q.out b/ql/src/test/results/clientpositive/spark/join33.q.out index 7cecbc6..5c0eaba 100644 --- a/ql/src/test/results/clientpositive/spark/join33.q.out +++ b/ql/src/test/results/clientpositive/spark/join33.q.out @@ -1,26 +1,18 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) JOIN srcpart z ON (x.value = z.value and z.ds='2008-04-08' and z.hr=11) PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, z.value, y.value FROM src1 x JOIN src y ON (x.key = y.key) diff --git a/ql/src/test/results/clientpositive/spark/join34.q.out b/ql/src/test/results/clientpositive/spark/join34.q.out index 2d97046..b299885 100644 --- a/ql/src/test/results/clientpositive/spark/join34.q.out +++ b/ql/src/test/results/clientpositive/spark/join34.q.out @@ -1,18 +1,12 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, x.value, subq1.value FROM @@ -22,9 +16,7 @@ FROM ) subq1 JOIN src1 x ON (x.key = subq1.key) PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, x.value, subq1.value FROM diff --git a/ql/src/test/results/clientpositive/spark/join35.q.out b/ql/src/test/results/clientpositive/spark/join35.q.out index 80c9998..e8e5688 100644 --- a/ql/src/test/results/clientpositive/spark/join35.q.out +++ b/ql/src/test/results/clientpositive/spark/join35.q.out @@ -1,18 +1,12 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 INT) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 INT) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 INT) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 INT) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, x.value, subq1.cnt FROM @@ -22,9 +16,7 @@ FROM ) subq1 JOIN src1 x ON (x.key = subq1.key) PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest_j1 SELECT x.key, x.value, subq1.cnt FROM diff --git a/ql/src/test/results/clientpositive/spark/join36.q.out b/ql/src/test/results/clientpositive/spark/join36.q.out index f8e0be2..49167f3 100644 --- a/ql/src/test/results/clientpositive/spark/join36.q.out +++ b/ql/src/test/results/clientpositive/spark/join36.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tmp1(key INT, cnt INT) +PREHOOK: query: CREATE TABLE tmp1(key INT, cnt INT) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmp1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE tmp1(key INT, cnt INT) +POSTHOOK: query: CREATE TABLE tmp1(key INT, cnt INT) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmp1 diff --git a/ql/src/test/results/clientpositive/spark/join37.q.out b/ql/src/test/results/clientpositive/spark/join37.q.out index 71aefa1..231ca22 100644 --- a/ql/src/test/results/clientpositive/spark/join37.q.out +++ b/ql/src/test/results/clientpositive/spark/join37.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/spark/join39.q.out b/ql/src/test/results/clientpositive/spark/join39.q.out index d72e681..5a5aeb4 100644 --- a/ql/src/test/results/clientpositive/spark/join39.q.out +++ b/ql/src/test/results/clientpositive/spark/join39.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, key1 string, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, key1 string, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, key1 string, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, key1 string, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/spark/join4.q.out b/ql/src/test/results/clientpositive/spark/join4.q.out index e80df52..08b1f58 100644 --- a/ql/src/test/results/clientpositive/spark/join4.q.out +++ b/ql/src/test/results/clientpositive/spark/join4.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/join41.q.out b/ql/src/test/results/clientpositive/spark/join41.q.out index f9b4839..9290f66 100644 --- a/ql/src/test/results/clientpositive/spark/join41.q.out +++ b/ql/src/test/results/clientpositive/spark/join41.q.out @@ -95,12 +95,10 @@ POSTHOOK: Input: default@s1 0 val_0 NULL NULL 0 val_0 NULL NULL 0 val_0 NULL NULL -PREHOOK: query: -- Make sure the big table is chosen correctly as part of HIVE-4146 -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM s1 src1 LEFT OUTER JOIN s1 src2 ON (src1.key = src2.key AND src2.key > 10) PREHOOK: type: QUERY -POSTHOOK: query: -- Make sure the big table is chosen correctly as part of HIVE-4146 -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM s1 src1 LEFT OUTER JOIN s1 src2 ON (src1.key = src2.key AND src2.key > 10) POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/join5.q.out b/ql/src/test/results/clientpositive/spark/join5.q.out index 464bd51..6d7723d 100644 --- a/ql/src/test/results/clientpositive/spark/join5.q.out +++ b/ql/src/test/results/clientpositive/spark/join5.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/join6.q.out b/ql/src/test/results/clientpositive/spark/join6.q.out index 8d759e4..8ae5e3a 100644 --- a/ql/src/test/results/clientpositive/spark/join6.q.out +++ b/ql/src/test/results/clientpositive/spark/join6.q.out @@ -6,9 +6,7 @@ POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN FROM ( FROM ( @@ -23,9 +21,7 @@ FROM ( ) c INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM ( FROM ( diff --git a/ql/src/test/results/clientpositive/spark/join7.q.out b/ql/src/test/results/clientpositive/spark/join7.q.out index b417397..d01817c 100644 --- a/ql/src/test/results/clientpositive/spark/join7.q.out +++ b/ql/src/test/results/clientpositive/spark/join7.q.out @@ -6,9 +6,7 @@ POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING, c5 INT POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN FROM ( FROM ( @@ -28,9 +26,7 @@ FROM ( ) c INSERT OVERWRITE TABLE dest1 SELECT c.c1, c.c2, c.c3, c.c4, c.c5, c.c6 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM ( FROM ( diff --git a/ql/src/test/results/clientpositive/spark/join8.q.out b/ql/src/test/results/clientpositive/spark/join8.q.out index 4903c90..1819808 100644 --- a/ql/src/test/results/clientpositive/spark/join8.q.out +++ b/ql/src/test/results/clientpositive/spark/join8.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(c1 INT, c2 STRING, c3 INT, c4 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/join9.q.out b/ql/src/test/results/clientpositive/spark/join9.q.out index 05aa50b..8f8fc88 100644 --- a/ql/src/test/results/clientpositive/spark/join9.q.out +++ b/ql/src/test/results/clientpositive/spark/join9.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/join_1to1.q.out b/ql/src/test/results/clientpositive/spark/join_1to1.q.out index b1fc8f0..1ccb613 100644 --- a/ql/src/test/results/clientpositive/spark/join_1to1.q.out +++ b/ql/src/test/results/clientpositive/spark/join_1to1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE join_1to1_1(key1 int, key2 int, value int) +PREHOOK: query: CREATE TABLE join_1to1_1(key1 int, key2 int, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@join_1to1_1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE join_1to1_1(key1 int, key2 int, value int) +POSTHOOK: query: CREATE TABLE join_1to1_1(key1 int, key2 int, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@join_1to1_1 diff --git a/ql/src/test/results/clientpositive/spark/join_casesensitive.q.out b/ql/src/test/results/clientpositive/spark/join_casesensitive.q.out index 4978b88..f9bc3c0 100644 --- a/ql/src/test/results/clientpositive/spark/join_casesensitive.q.out +++ b/ql/src/test/results/clientpositive/spark/join_casesensitive.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE joinone(key1 int, key2 int, value int) +PREHOOK: query: CREATE TABLE joinone(key1 int, key2 int, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@joinone -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE joinone(key1 int, key2 int, value int) +POSTHOOK: query: CREATE TABLE joinone(key1 int, key2 int, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@joinone diff --git a/ql/src/test/results/clientpositive/spark/join_filters_overlap.q.out b/ql/src/test/results/clientpositive/spark/join_filters_overlap.q.out index 351bfd6..777fbdc 100644 --- a/ql/src/test/results/clientpositive/spark/join_filters_overlap.q.out +++ b/ql/src/test/results/clientpositive/spark/join_filters_overlap.q.out @@ -1,26 +1,18 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- HIVE-3411 Filter predicates on outer join overlapped on single alias is not handled properly - -create table a as SELECT 100 as key, a.value as value FROM src LATERAL VIEW explode(array(40, 50, 60)) a as value limit 3 +PREHOOK: query: create table a as SELECT 100 as key, a.value as value FROM src LATERAL VIEW explode(array(40, 50, 60)) a as value limit 3 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@a -POSTHOOK: query: -- SORT_QUERY_RESULTS --- HIVE-3411 Filter predicates on outer join overlapped on single alias is not handled properly - -create table a as SELECT 100 as key, a.value as value FROM src LATERAL VIEW explode(array(40, 50, 60)) a as value limit 3 +POSTHOOK: query: create table a as SELECT 100 as key, a.value as value FROM src LATERAL VIEW explode(array(40, 50, 60)) a as value limit 3 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default POSTHOOK: Output: default@a POSTHOOK: Lineage: a.key SIMPLE [] POSTHOOK: Lineage: a.value SCRIPT [] -PREHOOK: query: -- overlap on a -explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) +PREHOOK: query: explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) PREHOOK: type: QUERY -POSTHOOK: query: -- overlap on a -explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) +POSTHOOK: query: explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -315,11 +307,9 @@ POSTHOOK: Input: default@a 100 40 NULL NULL NULL NULL 100 50 100 50 NULL NULL 100 60 NULL NULL 100 60 -PREHOOK: query: -- overlap on b -explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) +PREHOOK: query: explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) PREHOOK: type: QUERY -POSTHOOK: query: -- overlap on b -explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) +POSTHOOK: query: explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -614,11 +604,9 @@ POSTHOOK: Input: default@a 100 50 100 50 NULL NULL NULL NULL 100 40 NULL NULL NULL NULL 100 60 100 60 -PREHOOK: query: -- overlap on b with two filters for each -explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50 AND b.value>10) left outer join a c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) +PREHOOK: query: explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50 AND b.value>10) left outer join a c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) PREHOOK: type: QUERY -POSTHOOK: query: -- overlap on b with two filters for each -explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50 AND b.value>10) left outer join a c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) +POSTHOOK: query: explain extended select * from a right outer join a b on (a.key=b.key AND a.value=50 AND b.value=50 AND b.value>10) left outer join a c on (b.key=c.key AND b.value=60 AND b.value>20 AND c.value=60) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -913,11 +901,9 @@ POSTHOOK: Input: default@a 100 50 100 50 NULL NULL NULL NULL 100 40 NULL NULL NULL NULL 100 60 100 60 -PREHOOK: query: -- overlap on a, b -explain extended select * from a full outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) +PREHOOK: query: explain extended select * from a full outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) PREHOOK: type: QUERY -POSTHOOK: query: -- overlap on a, b -explain extended select * from a full outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) +POSTHOOK: query: explain extended select * from a full outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (b.key=c.key AND b.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -1275,11 +1261,9 @@ POSTHOOK: Input: default@a 100 60 NULL NULL NULL NULL NULL NULL NULL NULL 100 40 NULL NULL NULL NULL NULL NULL 100 60 100 60 NULL NULL -PREHOOK: query: -- triple overlap on a -explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) +PREHOOK: query: explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) PREHOOK: type: QUERY -POSTHOOK: query: -- triple overlap on a -explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) +POSTHOOK: query: explain extended select * from a left outer join a b on (a.key=b.key AND a.value=50 AND b.value=50) left outer join a c on (a.key=c.key AND a.value=60 AND c.value=60) left outer join a d on (a.key=d.key AND a.value=40 AND d.value=40) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/join_literals.q.out b/ql/src/test/results/clientpositive/spark/join_literals.q.out index eab2085..e2655d1 100644 --- a/ql/src/test/results/clientpositive/spark/join_literals.q.out +++ b/ql/src/test/results/clientpositive/spark/join_literals.q.out @@ -1,13 +1,9 @@ WARNING: Comparing a bigint and a string may result in a loss of precision. -PREHOOK: query: -- Test Joins with a variety of literals in the on clause - -SELECT COUNT(*) FROM src a JOIN src b ON a.key = b.key AND a.key = 0L +PREHOOK: query: SELECT COUNT(*) FROM src a JOIN src b ON a.key = b.key AND a.key = 0L PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Test Joins with a variety of literals in the on clause - -SELECT COUNT(*) FROM src a JOIN src b ON a.key = b.key AND a.key = 0L +POSTHOOK: query: SELECT COUNT(*) FROM src a JOIN src b ON a.key = b.key AND a.key = 0L POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/join_map_ppr.q.out b/ql/src/test/results/clientpositive/spark/join_map_ppr.q.out index 5d7cecf..bcb99b4 100644 --- a/ql/src/test/results/clientpositive/spark/join_map_ppr.q.out +++ b/ql/src/test/results/clientpositive/spark/join_map_ppr.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key STRING, value STRING, val2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/spark/join_merge_multi_expressions.q.out b/ql/src/test/results/clientpositive/spark/join_merge_multi_expressions.q.out index 62ab781..4f1148b 100644 --- a/ql/src/test/results/clientpositive/spark/join_merge_multi_expressions.q.out +++ b/ql/src/test/results/clientpositive/spark/join_merge_multi_expressions.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS -explain +PREHOOK: query: explain select count(*) from srcpart a join srcpart b on a.key = b.key and a.hr = b.hr join srcpart c on a.hr = c.hr and a.key = c.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS -explain +POSTHOOK: query: explain select count(*) from srcpart a join srcpart b on a.key = b.key and a.hr = b.hr join srcpart c on a.hr = c.hr and a.key = c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/join_nullsafe.q.out b/ql/src/test/results/clientpositive/spark/join_nullsafe.q.out index 99a7aa2..b8c4366 100644 --- a/ql/src/test/results/clientpositive/spark/join_nullsafe.q.out +++ b/ql/src/test/results/clientpositive/spark/join_nullsafe.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE myinput1(key int, value int) +PREHOOK: query: CREATE TABLE myinput1(key int, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@myinput1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE myinput1(key int, value int) +POSTHOOK: query: CREATE TABLE myinput1(key int, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@myinput1 @@ -18,11 +14,9 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in8.txt' INTO TABLE my POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@myinput1 -PREHOOK: query: -- merging -explain select * from myinput1 a join myinput1 b on a.key<=>b.value +PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value PREHOOK: type: QUERY -POSTHOOK: query: -- merging -explain select * from myinput1 a join myinput1 b on a.key<=>b.value +POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -86,13 +80,11 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS -select * from myinput1 a join myinput1 b on a.key<=>b.value +PREHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value PREHOOK: type: QUERY PREHOOK: Input: default@myinput1 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS -select * from myinput1 a join myinput1 b on a.key<=>b.value +POSTHOOK: query: select * from myinput1 a join myinput1 b on a.key<=>b.value POSTHOOK: type: QUERY POSTHOOK: Input: default@myinput1 #### A masked pattern was here #### @@ -503,13 +495,11 @@ POSTHOOK: Input: default@myinput1 100 100 100 100 100 100 NULL 10 10 NULL NULL 10 NULL NULL NULL NULL NULL NULL -PREHOOK: query: -- outer joins -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key<=>b.value +PREHOOK: query: SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key<=>b.value PREHOOK: type: QUERY PREHOOK: Input: default@myinput1 #### A masked pattern was here #### -POSTHOOK: query: -- outer joins -SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key<=>b.value +POSTHOOK: query: SELECT * FROM myinput1 a LEFT OUTER JOIN myinput1 b ON a.key<=>b.value POSTHOOK: type: QUERY POSTHOOK: Input: default@myinput1 #### A masked pattern was here #### @@ -566,13 +556,11 @@ NULL NULL 10 NULL NULL NULL 48 NULL NULL NULL NULL 35 NULL NULL NULL NULL -PREHOOK: query: -- map joins -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value +PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value PREHOOK: type: QUERY PREHOOK: Input: default@myinput1 #### A masked pattern was here #### -POSTHOOK: query: -- map joins -SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value +POSTHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM myinput1 a JOIN myinput1 b ON a.key<=>b.value POSTHOOK: type: QUERY POSTHOOK: Input: default@myinput1 #### A masked pattern was here #### @@ -630,13 +618,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/in5.txt' into table sm POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@smb_input -PREHOOK: query: -- smbs -CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@smb_input1 -POSTHOOK: query: -- smbs -CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE smb_input1(key int, value int) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@smb_input1 @@ -1552,11 +1538,9 @@ NULL 10050 NULL 10050 NULL 35 NULL 35 NULL NULL 12 NULL NULL NULL NULL NULL -PREHOOK: query: --HIVE-3315 join predicate transitive -explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.key is NULL +PREHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.key is NULL PREHOOK: type: QUERY -POSTHOOK: query: --HIVE-3315 join predicate transitive -explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.key is NULL +POSTHOOK: query: explain select * from myinput1 a join myinput1 b on a.key<=>b.value AND a.key is NULL POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/join_rc.q.out b/ql/src/test/results/clientpositive/spark/join_rc.q.out index ffe37c6..fbe3aa9 100644 --- a/ql/src/test/results/clientpositive/spark/join_rc.q.out +++ b/ql/src/test/results/clientpositive/spark/join_rc.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table join_rc1(key string, value string) stored as RCFile +PREHOOK: query: create table join_rc1(key string, value string) stored as RCFile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@join_rc1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table join_rc1(key string, value string) stored as RCFile +POSTHOOK: query: create table join_rc1(key string, value string) stored as RCFile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@join_rc1 diff --git a/ql/src/test/results/clientpositive/spark/join_reorder.q.out b/ql/src/test/results/clientpositive/spark/join_reorder.q.out index cc9f9a5..eae5426 100644 --- a/ql/src/test/results/clientpositive/spark/join_reorder.q.out +++ b/ql/src/test/results/clientpositive/spark/join_reorder.q.out @@ -46,14 +46,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t3 -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key +PREHOOK: query: EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key SELECT a.key, a.val, c.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key +POSTHOOK: query: EXPLAIN FROM T1 a JOIN src c ON c.key+1=a.key SELECT a.key, a.val, c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/join_thrift.q.out b/ql/src/test/results/clientpositive/spark/join_thrift.q.out index 72679ca..2187fc9 100644 --- a/ql/src/test/results/clientpositive/spark/join_thrift.q.out +++ b/ql/src/test/results/clientpositive/spark/join_thrift.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -DESCRIBE src_thrift +PREHOOK: query: DESCRIBE src_thrift PREHOOK: type: DESCTABLE PREHOOK: Input: default@src_thrift -POSTHOOK: query: -- SORT_QUERY_RESULTS - -DESCRIBE src_thrift +POSTHOOK: query: DESCRIBE src_thrift POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@src_thrift aint int from deserializer diff --git a/ql/src/test/results/clientpositive/spark/join_vc.q.out b/ql/src/test/results/clientpositive/spark/join_vc.q.out index 54c9e02..50762bd 100644 --- a/ql/src/test/results/clientpositive/spark/join_vc.q.out +++ b/ql/src/test/results/clientpositive/spark/join_vc.q.out @@ -1,14 +1,6 @@ -PREHOOK: query: -- see HIVE-4033 earlier a flag named hasVC was not initialized correctly in MapOperator.java, resulting in NPE for following query. order by and limit in the query is not relevant, problem would be evident even without those. They are there to keep .q.out file small and sorted. - --- SORT_QUERY_RESULTS - -explain select t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value from src t1 join src t2 on t1.key = t2.key join src t3 on t2.value = t3.value order by t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value limit 3 +PREHOOK: query: explain select t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value from src t1 join src t2 on t1.key = t2.key join src t3 on t2.value = t3.value order by t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value limit 3 PREHOOK: type: QUERY -POSTHOOK: query: -- see HIVE-4033 earlier a flag named hasVC was not initialized correctly in MapOperator.java, resulting in NPE for following query. order by and limit in the query is not relevant, problem would be evident even without those. They are there to keep .q.out file small and sorted. - --- SORT_QUERY_RESULTS - -explain select t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value from src t1 join src t2 on t1.key = t2.key join src t3 on t2.value = t3.value order by t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value limit 3 +POSTHOOK: query: explain select t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value from src t1 join src t2 on t1.key = t2.key join src t3 on t2.value = t3.value order by t3.BLOCK__OFFSET__INSIDE__FILE,t3.key,t3.value limit 3 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/join_view.q.out b/ql/src/test/results/clientpositive/spark/join_view.q.out index fc78988..6800218 100644 --- a/ql/src/test/results/clientpositive/spark/join_view.q.out +++ b/ql/src/test/results/clientpositive/spark/join_view.q.out @@ -22,17 +22,13 @@ POSTHOOK: query: create table invites2 (foo int, bar string) partitioned by (ds POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@invites2 -PREHOOK: query: -- test join views: see HIVE-1989 - -create view v as select invites.bar, invites2.foo, invites2.ds from invites join invites2 on invites.ds=invites2.ds +PREHOOK: query: create view v as select invites.bar, invites2.foo, invites2.ds from invites join invites2 on invites.ds=invites2.ds PREHOOK: type: CREATEVIEW PREHOOK: Input: default@invites PREHOOK: Input: default@invites2 PREHOOK: Output: database:default PREHOOK: Output: default@v -POSTHOOK: query: -- test join views: see HIVE-1989 - -create view v as select invites.bar, invites2.foo, invites2.ds from invites join invites2 on invites.ds=invites2.ds +POSTHOOK: query: create view v as select invites.bar, invites2.foo, invites2.ds from invites join invites2 on invites.ds=invites2.ds POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@invites POSTHOOK: Input: default@invites2 diff --git a/ql/src/test/results/clientpositive/spark/leftsemijoin.q.out b/ql/src/test/results/clientpositive/spark/leftsemijoin.q.out index 11f0bb0..a11bbc4 100644 --- a/ql/src/test/results/clientpositive/spark/leftsemijoin.q.out +++ b/ql/src/test/results/clientpositive/spark/leftsemijoin.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -drop table sales +PREHOOK: query: drop table sales PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -drop table sales +POSTHOOK: query: drop table sales POSTHOOK: type: DROPTABLE PREHOOK: query: drop table things PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/spark/leftsemijoin_mr.q.out b/ql/src/test/results/clientpositive/spark/leftsemijoin_mr.q.out index fe63057..ce0326c 100644 --- a/ql/src/test/results/clientpositive/spark/leftsemijoin_mr.q.out +++ b/ql/src/test/results/clientpositive/spark/leftsemijoin_mr.q.out @@ -30,15 +30,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/leftsemijoin_mr_t2.txt POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- Run this query using TestMinimrCliDriver - -SELECT * FROM T1 +PREHOOK: query: SELECT * FROM T1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### -POSTHOOK: query: -- Run this query using TestMinimrCliDriver - -SELECT * FROM T1 +POSTHOOK: query: SELECT * FROM T1 POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/limit_partition_metadataonly.q.out b/ql/src/test/results/clientpositive/spark/limit_partition_metadataonly.q.out index c5bc568..005026d 100644 --- a/ql/src/test/results/clientpositive/spark/limit_partition_metadataonly.q.out +++ b/ql/src/test/results/clientpositive/spark/limit_partition_metadataonly.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain select ds from srcpart where hr=11 and ds='2008-04-08' +PREHOOK: query: explain select ds from srcpart where hr=11 and ds='2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain select ds from srcpart where hr=11 and ds='2008-04-08' +POSTHOOK: query: explain select ds from srcpart where hr=11 and ds='2008-04-08' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out b/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out index 67c6e70..c81240d 100644 --- a/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out +++ b/ql/src/test/results/clientpositive/spark/limit_pushdown.q.out @@ -1,15 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- HIVE-3562 Some limit can be pushed down to map stage - -explain +PREHOOK: query: explain select key,value from src order by key limit 20 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- HIVE-3562 Some limit can be pushed down to map stage - -explain +POSTHOOK: query: explain select key,value from src order by key limit 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -264,12 +256,10 @@ val_126 127.0 val_128 387.0 val_129 260.0 val_131 132.0 -PREHOOK: query: -- deduped RS -explain +PREHOOK: query: explain select value,avg(key + 1) from src group by value order by value limit 20 PREHOOK: type: QUERY -POSTHOOK: query: -- deduped RS -explain +POSTHOOK: query: explain select value,avg(key + 1) from src group by value order by value limit 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -358,12 +348,10 @@ val_126 127.0 val_128 129.0 val_129 130.0 val_131 132.0 -PREHOOK: query: -- distincts -explain +PREHOOK: query: explain select distinct(cdouble) as dis from alltypesorc order by dis limit 20 PREHOOK: type: QUERY -POSTHOOK: query: -- distincts -explain +POSTHOOK: query: explain select distinct(cdouble) as dis from alltypesorc order by dis limit 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -637,12 +625,10 @@ POSTHOOK: Input: default@alltypesorc -63 19 -64 24 NULL 2932 -PREHOOK: query: -- multi distinct -explain +PREHOOK: query: explain select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint order by ctinyint limit 20 PREHOOK: type: QUERY -POSTHOOK: query: -- multi distinct -explain +POSTHOOK: query: explain select ctinyint, count(distinct(cstring1)), count(distinct(cstring2)) from alltypesorc group by ctinyint order by ctinyint limit 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -730,12 +716,10 @@ POSTHOOK: Input: default@alltypesorc -63 3 16 -64 3 13 NULL 3065 3 -PREHOOK: query: -- limit zero -explain +PREHOOK: query: explain select key,value from src order by key limit 0 PREHOOK: type: QUERY -POSTHOOK: query: -- limit zero -explain +POSTHOOK: query: explain select key,value from src order by key limit 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -756,12 +740,10 @@ POSTHOOK: query: select key,value from src order by key limit 0 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -PREHOOK: query: -- 2MR (applied to last RS) -explain +PREHOOK: query: explain select value, sum(key) as sum from src group by value order by sum limit 20 PREHOOK: type: QUERY -POSTHOOK: query: -- 2MR (applied to last RS) -explain +POSTHOOK: query: explain select value, sum(key) as sum from src group by value order by sum limit 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -862,16 +844,14 @@ val_41 41.0 val_5 15.0 val_8 8.0 val_9 9.0 -PREHOOK: query: -- subqueries -explain +PREHOOK: query: explain select * from (select key, count(1) from src group by key order by key limit 2) subq join (select key, count(1) from src group by key limit 3) subq2 on subq.key=subq2.key limit 4 PREHOOK: type: QUERY -POSTHOOK: query: -- subqueries -explain +POSTHOOK: query: explain select * from (select key, count(1) from src group by key order by key limit 2) subq join @@ -1017,12 +997,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- map aggregation disabled -explain +PREHOOK: query: explain select value, sum(key) as sum from src group by value order by value limit 20 PREHOOK: type: QUERY -POSTHOOK: query: -- map aggregation disabled -explain +POSTHOOK: query: explain select value, sum(key) as sum from src group by value order by value limit 20 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1105,12 +1083,10 @@ val_126 126.0 val_128 384.0 val_129 258.0 val_131 131.0 -PREHOOK: query: -- flush for order-by -explain +PREHOOK: query: explain select key,value,value,value,value,value,value,value,value from src order by key limit 100 PREHOOK: type: QUERY -POSTHOOK: query: -- flush for order-by -explain +POSTHOOK: query: explain select key,value,value,value,value,value,value,value,value from src order by key limit 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1270,12 +1246,10 @@ POSTHOOK: Input: default@src 187 val_187 val_187 val_187 val_187 val_187 val_187 val_187 val_187 187 val_187 val_187 val_187 val_187 val_187 val_187 val_187 val_187 187 val_187 val_187 val_187 val_187 val_187 val_187 val_187 val_187 -PREHOOK: query: -- flush for group-by -explain +PREHOOK: query: explain select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100 PREHOOK: type: QUERY -POSTHOOK: query: -- flush for group-by -explain +POSTHOOK: query: explain select sum(key) as sum from src group by concat(key,value,value,value,value,value,value,value,value,value) order by sum limit 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.out b/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.out index 2e5c8b4..1ab08fc 100644 --- a/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.out +++ b/ql/src/test/results/clientpositive/spark/list_bucket_dml_10.q.out @@ -1,18 +1,4 @@ -PREHOOK: query: -- run this test case in minimr to ensure it works in cluster - --- list bucketing DML: static partition. multiple skewed columns. --- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME: --- 5263 000000_0 --- 5263 000001_0 --- ds=2008-04-08/hr=11/key=103/value=val_103: --- 99 000000_0 --- 99 000001_0 --- ds=2008-04-08/hr=11/key=484/value=val_484: --- 87 000000_0 --- 87 000001_0 - --- create a skewed table -create table list_bucketing_static_part (key String, value String) +PREHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ('484','51','103') stored as DIRECTORIES @@ -20,21 +6,7 @@ create table list_bucketing_static_part (key String, value String) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: -- run this test case in minimr to ensure it works in cluster - --- list bucketing DML: static partition. multiple skewed columns. --- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME: --- 5263 000000_0 --- 5263 000001_0 --- ds=2008-04-08/hr=11/key=103/value=val_103: --- 99 000000_0 --- 99 000001_0 --- ds=2008-04-08/hr=11/key=484/value=val_484: --- 87 000000_0 --- 87 000001_0 - --- create a skewed table -create table list_bucketing_static_part (key String, value String) +POSTHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key) on ('484','51','103') stored as DIRECTORIES @@ -42,13 +14,11 @@ create table list_bucketing_static_part (key String, value String) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_static_part -PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files. -explain extended +PREHOOK: query: explain extended insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') select key, value from src PREHOOK: type: QUERY -POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files. -explain extended +POSTHOOK: query: explain extended insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') select key, value from src POSTHOOK: type: QUERY @@ -194,12 +164,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- check DML result -show partitions list_bucketing_static_part +PREHOOK: query: show partitions list_bucketing_static_part PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: -- check DML result -show partitions list_bucketing_static_part +POSTHOOK: query: show partitions list_bucketing_static_part POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@list_bucketing_static_part ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out b/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out index dabe1ca..fea5b2f 100644 --- a/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out +++ b/ql/src/test/results/clientpositive/spark/list_bucket_dml_2.q.out @@ -1,19 +1,4 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- list bucketing DML: static partition. multiple skewed columns. --- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME: --- 5263 000000_0 --- 5263 000001_0 --- ds=2008-04-08/hr=11/key=103/value=val_103: --- 99 000000_0 --- 99 000001_0 --- ds=2008-04-08/hr=11/key=484/value=val_484: --- 87 000000_0 --- 87 000001_0 - --- create a skewed table -create table list_bucketing_static_part (key String, value String) +PREHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES @@ -21,22 +6,7 @@ create table list_bucketing_static_part (key String, value String) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- SORT_QUERY_RESULTS - --- list bucketing DML: static partition. multiple skewed columns. --- ds=2008-04-08/hr=11/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME: --- 5263 000000_0 --- 5263 000001_0 --- ds=2008-04-08/hr=11/key=103/value=val_103: --- 99 000000_0 --- 99 000001_0 --- ds=2008-04-08/hr=11/key=484/value=val_484: --- 87 000000_0 --- 87 000001_0 - --- create a skewed table -create table list_bucketing_static_part (key String, value String) +POSTHOOK: query: create table list_bucketing_static_part (key String, value String) partitioned by (ds String, hr String) skewed by (key, value) on (('484','val_484'),('51','val_14'),('103','val_103')) stored as DIRECTORIES @@ -44,13 +14,11 @@ create table list_bucketing_static_part (key String, value String) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@list_bucketing_static_part -PREHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files. -explain extended +PREHOOK: query: explain extended insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- list bucketing DML without merge. use bucketize to generate a few small files. -explain extended +POSTHOOK: query: explain extended insert overwrite table list_bucketing_static_part partition (ds = '2008-04-08', hr = '11') select key, value from srcpart where ds = '2008-04-08' POSTHOOK: type: QUERY @@ -249,12 +217,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@list_bucketing_static_part@ds=2008-04-08/hr=11 POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).key SIMPLE [(srcpart)srcpart.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: list_bucketing_static_part PARTITION(ds=2008-04-08,hr=11).value SIMPLE [(srcpart)srcpart.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- check DML result -show partitions list_bucketing_static_part +PREHOOK: query: show partitions list_bucketing_static_part PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@list_bucketing_static_part -POSTHOOK: query: -- check DML result -show partitions list_bucketing_static_part +POSTHOOK: query: show partitions list_bucketing_static_part POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@list_bucketing_static_part ds=2008-04-08/hr=11 @@ -417,17 +383,13 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### 484 val_484 2008-04-08 11 484 val_484 2008-04-08 12 -PREHOOK: query: -- 51 and val_51 in the table so skewed data for 51 and val_14 should be none --- but query should succeed for 51 or 51 and val_14 -select * from srcpart where ds = '2008-04-08' and key = '51' +PREHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '51' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### -POSTHOOK: query: -- 51 and val_51 in the table so skewed data for 51 and val_14 should be none --- but query should succeed for 51 or 51 and val_14 -select * from srcpart where ds = '2008-04-08' and key = '51' +POSTHOOK: query: select * from srcpart where ds = '2008-04-08' and key = '51' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -473,15 +435,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@list_bucketing_static_part POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 #### A masked pattern was here #### -PREHOOK: query: -- queries with < <= > >= should work for skewed test although we don't benefit from pruning -select count(1) from srcpart where ds = '2008-04-08' and key < '51' +PREHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key < '51' PREHOOK: type: QUERY PREHOOK: Input: default@srcpart PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12 #### A masked pattern was here #### -POSTHOOK: query: -- queries with < <= > >= should work for skewed test although we don't benefit from pruning -select count(1) from srcpart where ds = '2008-04-08' and key < '51' +POSTHOOK: query: select count(1) from srcpart where ds = '2008-04-08' and key < '51' POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11 @@ -571,13 +531,11 @@ POSTHOOK: Input: default@list_bucketing_static_part POSTHOOK: Input: default@list_bucketing_static_part@ds=2008-04-08/hr=11 #### A masked pattern was here #### 90 -PREHOOK: query: -- clean up -drop table list_bucketing_static_part +PREHOOK: query: drop table list_bucketing_static_part PREHOOK: type: DROPTABLE PREHOOK: Input: default@list_bucketing_static_part PREHOOK: Output: default@list_bucketing_static_part -POSTHOOK: query: -- clean up -drop table list_bucketing_static_part +POSTHOOK: query: drop table list_bucketing_static_part POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@list_bucketing_static_part POSTHOOK: Output: default@list_bucketing_static_part diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part1.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part1.q.out index e146675..5bf7f28 100644 --- a/ql/src/test/results/clientpositive/spark/load_dyn_part1.q.out +++ b/ql/src/test/results/clientpositive/spark/load_dyn_part1.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +PREHOOK: query: show partitions srcpart PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@srcpart -POSTHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +POSTHOOK: query: show partitions srcpart POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@srcpart ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part10.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part10.q.out index ac4ca2c..5ec6d68 100644 --- a/ql/src/test/results/clientpositive/spark/load_dyn_part10.q.out +++ b/ql/src/test/results/clientpositive/spark/load_dyn_part10.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +PREHOOK: query: show partitions srcpart PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@srcpart -POSTHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +POSTHOOK: query: show partitions srcpart POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@srcpart ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out index d47ee53..ce65e71 100644 --- a/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out +++ b/ql/src/test/results/clientpositive/spark/load_dyn_part14.q.out @@ -1,19 +1,9 @@ -PREHOOK: query: -- EXCLUDE_OS_WINDOWS --- excluded on windows because of difference in file name encoding logic - --- SORT_QUERY_RESULTS - -create table if not exists nzhang_part14 (key string) +PREHOOK: query: create table if not exists nzhang_part14 (key string) partitioned by (value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@nzhang_part14 -POSTHOOK: query: -- EXCLUDE_OS_WINDOWS --- excluded on windows because of difference in file name encoding logic - --- SORT_QUERY_RESULTS - -create table if not exists nzhang_part14 (key string) +POSTHOOK: query: create table if not exists nzhang_part14 (key string) partitioned by (value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out index 9177290..d066b3a 100644 --- a/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out +++ b/ql/src/test/results/clientpositive/spark/load_dyn_part2.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table if not exists nzhang_part_bucket (key string, value string) +PREHOOK: query: create table if not exists nzhang_part_bucket (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 10 buckets PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@nzhang_part_bucket -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table if not exists nzhang_part_bucket (key string, value string) +POSTHOOK: query: create table if not exists nzhang_part_bucket (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 10 buckets POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out index d9ec87f..d120963 100644 --- a/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out +++ b/ql/src/test/results/clientpositive/spark/load_dyn_part3.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +PREHOOK: query: show partitions srcpart PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@srcpart -POSTHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +POSTHOOK: query: show partitions srcpart POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@srcpart ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part4.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part4.q.out index 53bb111..7ec76b5 100644 --- a/ql/src/test/results/clientpositive/spark/load_dyn_part4.q.out +++ b/ql/src/test/results/clientpositive/spark/load_dyn_part4.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +PREHOOK: query: show partitions srcpart PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@srcpart -POSTHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +POSTHOOK: query: show partitions srcpart POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@srcpart ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out index ee70e71..c4e2740 100644 --- a/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out +++ b/ql/src/test/results/clientpositive/spark/load_dyn_part8.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +PREHOOK: query: show partitions srcpart PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@srcpart -POSTHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +POSTHOOK: query: show partitions srcpart POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@srcpart ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/spark/load_dyn_part9.q.out b/ql/src/test/results/clientpositive/spark/load_dyn_part9.q.out index c9790d1..55bcfae 100644 --- a/ql/src/test/results/clientpositive/spark/load_dyn_part9.q.out +++ b/ql/src/test/results/clientpositive/spark/load_dyn_part9.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +PREHOOK: query: show partitions srcpart PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@srcpart -POSTHOOK: query: -- SORT_QUERY_RESULTS - -show partitions srcpart +POSTHOOK: query: show partitions srcpart POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@srcpart ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/spark/load_fs2.q.out b/ql/src/test/results/clientpositive/spark/load_fs2.q.out index 1846542..aef25ce 100644 --- a/ql/src/test/results/clientpositive/spark/load_fs2.q.out +++ b/ql/src/test/results/clientpositive/spark/load_fs2.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- HIVE-3300 [jira] LOAD DATA INPATH fails if a hdfs file with same name is added to table --- 'loader' table is used only for uploading kv1.txt to HDFS (!hdfs -put is not working on minMRDriver) - -create table result (key string, value string) +PREHOOK: query: create table result (key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@result -POSTHOOK: query: -- HIVE-3300 [jira] LOAD DATA INPATH fails if a hdfs file with same name is added to table --- 'loader' table is used only for uploading kv1.txt to HDFS (!hdfs -put is not working on minMRDriver) - -create table result (key string, value string) +POSTHOOK: query: create table result (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@result diff --git a/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out b/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out index 6cfde3f..473fe1c 100644 --- a/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/spark/louter_join_ppr.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED FROM src a LEFT OUTER JOIN @@ -9,9 +7,7 @@ EXPLAIN EXTENDED SELECT a.key, a.value, b.key, b.value WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED FROM src a LEFT OUTER JOIN diff --git a/ql/src/test/results/clientpositive/spark/mapjoin1.q.out b/ql/src/test/results/clientpositive/spark/mapjoin1.q.out index 9027bf4..ca489e4 100644 --- a/ql/src/test/results/clientpositive/spark/mapjoin1.q.out +++ b/ql/src/test/results/clientpositive/spark/mapjoin1.q.out @@ -21,12 +21,10 @@ POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11 POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12 #### A masked pattern was here #### 1114788.0 -PREHOOK: query: -- const filter on outer join -EXPLAIN +PREHOOK: query: EXPLAIN SELECT /*+ MAPJOIN(a) */ * FROM src a RIGHT OUTER JOIN src b on a.key=b.key AND true limit 10 PREHOOK: type: QUERY -POSTHOOK: query: -- const filter on outer join -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT /*+ MAPJOIN(a) */ * FROM src a RIGHT OUTER JOIN src b on a.key=b.key AND true limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -117,12 +115,10 @@ POSTHOOK: Input: default@src 165 val_165 165 val_165 165 val_165 165 val_165 409 val_409 409 val_409 -PREHOOK: query: -- func filter on outer join -EXPLAIN +PREHOOK: query: EXPLAIN SELECT /*+ MAPJOIN(a) */ * FROM src a RIGHT OUTER JOIN src b on a.key=b.key AND b.key * 10 < '1000' limit 10 PREHOOK: type: QUERY -POSTHOOK: query: -- func filter on outer join -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT /*+ MAPJOIN(a) */ * FROM src a RIGHT OUTER JOIN src b on a.key=b.key AND b.key * 10 < '1000' limit 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -213,13 +209,11 @@ NULL NULL 255 val_255 NULL NULL 278 val_278 98 val_98 98 val_98 98 val_98 98 val_98 -PREHOOK: query: -- field filter on outer join -EXPLAIN +PREHOOK: query: EXPLAIN SELECT /*+ MAPJOIN(a) */ * FROM src a RIGHT OUTER JOIN (select key, named_struct('key', key, 'value', value) as kv from src) b on a.key=b.key AND b.kv.key > 200 limit 10 PREHOOK: type: QUERY -POSTHOOK: query: -- field filter on outer join -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT /*+ MAPJOIN(a) */ * FROM src a RIGHT OUTER JOIN (select key, named_struct('key', key, 'value', value) as kv from src) b on a.key=b.key AND b.kv.key > 200 limit 10 POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/mapjoin_decimal.q.out b/ql/src/test/results/clientpositive/spark/mapjoin_decimal.q.out index 4d02c58..f9b9843 100644 --- a/ql/src/test/results/clientpositive/spark/mapjoin_decimal.q.out +++ b/ql/src/test/results/clientpositive/spark/mapjoin_decimal.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE over1k(t tinyint, +PREHOOK: query: CREATE TABLE over1k(t tinyint, si smallint, i int, b bigint, @@ -16,9 +14,7 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@over1k -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE over1k(t tinyint, +POSTHOOK: query: CREATE TABLE over1k(t tinyint, si smallint, i int, b bigint, diff --git a/ql/src/test/results/clientpositive/spark/mapjoin_filter_on_outerjoin.q.out b/ql/src/test/results/clientpositive/spark/mapjoin_filter_on_outerjoin.q.out index 666c8d2..9be4c29 100644 --- a/ql/src/test/results/clientpositive/spark/mapjoin_filter_on_outerjoin.q.out +++ b/ql/src/test/results/clientpositive/spark/mapjoin_filter_on_outerjoin.q.out @@ -1,8 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - ---HIVE-2101 mapjoin sometimes gives wrong results if there is a filter in the on condition - -SELECT * FROM src1 +PREHOOK: query: SELECT * FROM src1 RIGHT OUTER JOIN src1 src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) JOIN src src3 ON (src2.key = src3.key AND src3.key < 300) SORT BY src1.key, src2.key, src3.key @@ -10,11 +6,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Input: default@src1 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - ---HIVE-2101 mapjoin sometimes gives wrong results if there is a filter in the on condition - -SELECT * FROM src1 +POSTHOOK: query: SELECT * FROM src1 RIGHT OUTER JOIN src1 src2 ON (src1.key = src2.key AND src1.key < 10 AND src2.key > 10) JOIN src src3 ON (src2.key = src3.key AND src3.key < 300) SORT BY src1.key, src2.key, src3.key diff --git a/ql/src/test/results/clientpositive/spark/mapjoin_mapjoin.q.out b/ql/src/test/results/clientpositive/spark/mapjoin_mapjoin.q.out index 3a86b39..bf829d9 100644 --- a/ql/src/test/results/clientpositive/spark/mapjoin_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/spark/mapjoin_mapjoin.q.out @@ -1,14 +1,6 @@ -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - --- SORT_QUERY_RESULTS - -explain extended select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) +PREHOOK: query: explain extended select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - --- SORT_QUERY_RESULTS - -explain extended select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) +POSTHOOK: query: explain extended select srcpart.key from srcpart join src on (srcpart.value=src.value) join src1 on (srcpart.key=src1.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/mapjoin_memcheck.q.out b/ql/src/test/results/clientpositive/spark/mapjoin_memcheck.q.out index 3ea30e2..c4997e9 100644 --- a/ql/src/test/results/clientpositive/spark/mapjoin_memcheck.q.out +++ b/ql/src/test/results/clientpositive/spark/mapjoin_memcheck.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table src0 like src +PREHOOK: query: create table src0 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src0 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table src0 like src +POSTHOOK: query: create table src0 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src0 diff --git a/ql/src/test/results/clientpositive/spark/mapjoin_subquery.q.out b/ql/src/test/results/clientpositive/spark/mapjoin_subquery.q.out index 035bfc5..9416f6e 100644 --- a/ql/src/test/results/clientpositive/spark/mapjoin_subquery.q.out +++ b/ql/src/test/results/clientpositive/spark/mapjoin_subquery.q.out @@ -1,17 +1,11 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT subq.key1, z.value FROM (SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2 FROM src1 x JOIN src y ON (x.key = y.key)) subq JOIN srcpart z ON (subq.key1 = z.key and z.ds='2008-04-08' and z.hr=11) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS --- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT subq.key1, z.value FROM (SELECT x.key as key1, x.value as value1, y.key as key2, y.value as value2 diff --git a/ql/src/test/results/clientpositive/spark/mapjoin_subquery2.q.out b/ql/src/test/results/clientpositive/spark/mapjoin_subquery2.q.out index 191eeab..a368270 100644 --- a/ql/src/test/results/clientpositive/spark/mapjoin_subquery2.q.out +++ b/ql/src/test/results/clientpositive/spark/mapjoin_subquery2.q.out @@ -64,18 +64,14 @@ POSTHOOK: query: load data local inpath '../../data/files/z.txt' INTO TABLE z POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@z -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT subq.key1, subq.value1, subq.key2, subq.value2, z.id, z.name FROM (SELECT x.id as key1, x.name as value1, y.id as key2, y.name as value2 FROM y JOIN x ON (x.id = y.id)) subq JOIN z ON (subq.key1 = z.id) PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT subq.key1, subq.value1, subq.key2, subq.value2, z.id, z.name FROM (SELECT x.id as key1, x.name as value1, y.id as key2, y.name as value2 diff --git a/ql/src/test/results/clientpositive/spark/mapjoin_test_outer.q.out b/ql/src/test/results/clientpositive/spark/mapjoin_test_outer.q.out index 8f30fad..c66e689 100644 --- a/ql/src/test/results/clientpositive/spark/mapjoin_test_outer.q.out +++ b/ql/src/test/results/clientpositive/spark/mapjoin_test_outer.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - ---HIVE-2101 mapjoin sometimes gives wrong results if there is a filter in the on condition - -create table dest_1 (key STRING, value STRING) stored as textfile +PREHOOK: query: create table dest_1 (key STRING, value STRING) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - ---HIVE-2101 mapjoin sometimes gives wrong results if there is a filter in the on condition - -create table dest_1 (key STRING, value STRING) stored as textfile +POSTHOOK: query: create table dest_1 (key STRING, value STRING) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_1 diff --git a/ql/src/test/results/clientpositive/spark/merge2.q.out b/ql/src/test/results/clientpositive/spark/merge2.q.out index 63274eb..d780dc2 100644 --- a/ql/src/test/results/clientpositive/spark/merge2.q.out +++ b/ql/src/test/results/clientpositive/spark/merge2.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table test1(key int, val int) +PREHOOK: query: create table test1(key int, val int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table test1(key int, val int) +POSTHOOK: query: create table test1(key int, val int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test1 diff --git a/ql/src/test/results/clientpositive/spark/mergejoins.q.out b/ql/src/test/results/clientpositive/spark/mergejoins.q.out index 5037998..a3f8f8d 100644 --- a/ql/src/test/results/clientpositive/spark/mergejoins.q.out +++ b/ql/src/test/results/clientpositive/spark/mergejoins.q.out @@ -188,11 +188,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: --HIVE-3070 filter on outer join condition removed while merging join tree -explain select * from src a join src b on a.key=b.key left outer join src c on b.key=c.key and b.key<10 +PREHOOK: query: explain select * from src a join src b on a.key=b.key left outer join src c on b.key=c.key and b.key<10 PREHOOK: type: QUERY -POSTHOOK: query: --HIVE-3070 filter on outer join condition removed while merging join tree -explain select * from src a join src b on a.key=b.key left outer join src c on b.key=c.key and b.key<10 +POSTHOOK: query: explain select * from src a join src b on a.key=b.key left outer join src c on b.key=c.key and b.key<10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/mergejoins_mixed.q.out b/ql/src/test/results/clientpositive/spark/mergejoins_mixed.q.out index 9e7c1e5..ebea012 100644 --- a/ql/src/test/results/clientpositive/spark/mergejoins_mixed.q.out +++ b/ql/src/test/results/clientpositive/spark/mergejoins_mixed.q.out @@ -1,21 +1,15 @@ -PREHOOK: query: -- HIVE-3464 - -create table a (key string, value string) +PREHOOK: query: create table a (key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@a -POSTHOOK: query: -- HIVE-3464 - -create table a (key string, value string) +POSTHOOK: query: create table a (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@a -PREHOOK: query: -- (a-b-c-d) -explain +PREHOOK: query: explain select * from a join a b on (a.key=b.key) left outer join a c on (b.key=c.key) left outer join a d on (a.key=d.key) PREHOOK: type: QUERY -POSTHOOK: query: -- (a-b-c-d) -explain +POSTHOOK: query: explain select * from a join a b on (a.key=b.key) left outer join a c on (b.key=c.key) left outer join a d on (a.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -465,12 +459,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- ((a-b-d)-c) (reordered) -explain +PREHOOK: query: explain select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) left outer join a d on (a.key=d.key) PREHOOK: type: QUERY -POSTHOOK: query: -- ((a-b-d)-c) (reordered) -explain +POSTHOOK: query: explain select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) left outer join a d on (a.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -860,12 +852,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- (((a-b)-c)-d) -explain +PREHOOK: query: explain select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) right outer join a d on (a.key=d.key) PREHOOK: type: QUERY -POSTHOOK: query: -- (((a-b)-c)-d) -explain +POSTHOOK: query: explain select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) right outer join a d on (a.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -1426,12 +1416,10 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- ((a-b)-c-d) -explain +PREHOOK: query: explain select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) left outer join a d on (c.key=d.key) PREHOOK: type: QUERY -POSTHOOK: query: -- ((a-b)-c-d) -explain +POSTHOOK: query: explain select * from a join a b on (a.key=b.key) left outer join a c on (b.value=c.key) left outer join a d on (c.key=d.key) POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/multi_insert.q.out b/ql/src/test/results/clientpositive/spark/multi_insert.q.out index b373236..33af962 100644 --- a/ql/src/test/results/clientpositive/spark/multi_insert.q.out +++ b/ql/src/test/results/clientpositive/spark/multi_insert.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table src_multi1 like src +PREHOOK: query: create table src_multi1 like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_multi1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table src_multi1 like src +POSTHOOK: query: create table src_multi1 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_multi1 diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out index d5de394..d8c4b7f 100644 --- a/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out +++ b/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - ---HIVE-3699 Multiple insert overwrite into multiple tables query stores same results in all tables -create table e1 (key string, count int) +PREHOOK: query: create table e1 (key string, count int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@e1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - ---HIVE-3699 Multiple insert overwrite into multiple tables query stores same results in all tables -create table e1 (key string, count int) +POSTHOOK: query: create table e1 (key string, count int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@e1 diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_gby2.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_gby2.q.out index 965c5a1..81b882a 100644 --- a/ql/src/test/results/clientpositive/spark/multi_insert_gby2.q.out +++ b/ql/src/test/results/clientpositive/spark/multi_insert_gby2.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: --HIVE-3699 Multiple insert overwrite into multiple tables query stores same results in all tables -create table e1 (count int) +PREHOOK: query: create table e1 (count int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@e1 -POSTHOOK: query: --HIVE-3699 Multiple insert overwrite into multiple tables query stores same results in all tables -create table e1 (count int) +POSTHOOK: query: create table e1 (count int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@e1 diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out index 588d54a..17b7406 100644 --- a/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out +++ b/ql/src/test/results/clientpositive/spark/multi_insert_gby3.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS -create table e1 (key string, keyD double) +PREHOOK: query: create table e1 (key string, keyD double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@e1 -POSTHOOK: query: -- SORT_QUERY_RESULTS -create table e1 (key string, keyD double) +POSTHOOK: query: create table e1 (key string, keyD double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@e1 diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out index c3a3511..dd3fa50 100644 --- a/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out +++ b/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table src_10 as select * from src limit 10 +PREHOOK: query: create table src_10 as select * from src limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@src_10 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table src_10 as select * from src limit 10 +POSTHOOK: query: create table src_10 as select * from src limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default @@ -38,22 +34,12 @@ POSTHOOK: query: create table src_lv3 (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_lv3 -PREHOOK: query: -- 2LV --- TS[0]-LVF[1]-SEL[2]-LVJ[5]-SEL[11]-FS[12] --- -SEL[3]-UDTF[4]-LVJ[5] --- -LVF[6]-SEL[7]-LVJ[10]-SEL[13]-FS[14] --- -SEL[8]-UDTF[9]-LVJ[10] -explain +PREHOOK: query: explain from src_10 insert overwrite table src_lv1 select key, C lateral view explode(array(key+1, key+2)) A as C insert overwrite table src_lv2 select key, C lateral view explode(array(key+3, key+4)) A as C PREHOOK: type: QUERY -POSTHOOK: query: -- 2LV --- TS[0]-LVF[1]-SEL[2]-LVJ[5]-SEL[11]-FS[12] --- -SEL[3]-UDTF[4]-LVJ[5] --- -LVF[6]-SEL[7]-LVJ[10]-SEL[13]-FS[14] --- -SEL[8]-UDTF[9]-LVJ[10] -explain +POSTHOOK: query: explain from src_10 insert overwrite table src_lv1 select key, C lateral view explode(array(key+1, key+2)) A as C insert overwrite table src_lv2 select key, C lateral view explode(array(key+3, key+4)) A as C @@ -262,22 +248,12 @@ POSTHOOK: Input: default@src_lv2 86 90.0 98 101.0 98 102.0 -PREHOOK: query: -- 2(LV+GBY) --- TS[0]-LVF[1]-SEL[2]-LVJ[5]-SEL[11]-GBY[12]-RS[13]-GBY[14]-SEL[15]-FS[16] --- -SEL[3]-UDTF[4]-LVJ[5] --- -LVF[6]-SEL[7]-LVJ[10]-SEL[17]-GBY[18]-RS[19]-GBY[20]-SEL[21]-FS[22] --- -SEL[8]-UDTF[9]-LVJ[10] -explain +PREHOOK: query: explain from src_10 insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key insert overwrite table src_lv2 select key, sum(C) lateral view explode(array(key+3, key+4)) A as C group by key PREHOOK: type: QUERY -POSTHOOK: query: -- 2(LV+GBY) --- TS[0]-LVF[1]-SEL[2]-LVJ[5]-SEL[11]-GBY[12]-RS[13]-GBY[14]-SEL[15]-FS[16] --- -SEL[3]-UDTF[4]-LVJ[5] --- -LVF[6]-SEL[7]-LVJ[10]-SEL[17]-GBY[18]-RS[19]-GBY[20]-SEL[21]-FS[22] --- -SEL[8]-UDTF[9]-LVJ[10] -explain +POSTHOOK: query: explain from src_10 insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key insert overwrite table src_lv2 select key, sum(C) lateral view explode(array(key+3, key+4)) A as C group by key @@ -506,23 +482,13 @@ POSTHOOK: Input: default@src_lv2 484 975.0 86 179.0 98 203.0 -PREHOOK: query: -- (LV+GBY) + RS:2GBY --- TS[0]-LVF[1]-SEL[2]-LVJ[5]-SEL[6]-GBY[7]-RS[8]-GBY[9]-SEL[10]-FS[11] --- -SEL[3]-UDTF[4]-LVJ[5] --- -FIL[12]-SEL[13]-RS[14]-FOR[15]-FIL[16]-GBY[17]-SEL[18]-FS[19] --- -FIL[20]-GBY[21]-SEL[22]-FS[23] -explain +PREHOOK: query: explain from src_10 insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key insert overwrite table src_lv2 select key, count(value) where key > 200 group by key insert overwrite table src_lv3 select key, count(value) where key < 200 group by key PREHOOK: type: QUERY -POSTHOOK: query: -- (LV+GBY) + RS:2GBY --- TS[0]-LVF[1]-SEL[2]-LVJ[5]-SEL[6]-GBY[7]-RS[8]-GBY[9]-SEL[10]-FS[11] --- -SEL[3]-UDTF[4]-LVJ[5] --- -FIL[12]-SEL[13]-RS[14]-FOR[15]-FIL[16]-GBY[17]-SEL[18]-FS[19] --- -FIL[20]-GBY[21]-SEL[22]-FS[23] -explain +POSTHOOK: query: explain from src_10 insert overwrite table src_lv1 select key, sum(C) lateral view explode(array(key+1, key+2)) A as C group by key insert overwrite table src_lv2 select key, count(value) where key > 200 group by key @@ -769,27 +735,13 @@ POSTHOOK: Input: default@src_lv3 27 1 86 1 98 1 -PREHOOK: query: -- todo: shared distinct columns (should work with hive.optimize.multigroupby.common.distincts) --- 2(LV+GBY) + RS:2GBY --- TS[0]-LVF[1]-SEL[2]-LVJ[5]-SEL[11]-GBY[12]-RS[13]-GBY[14]-SEL[15]-FS[16] --- -SEL[3]-UDTF[4]-LVJ[5] --- -LVF[6]-SEL[7]-LVJ[10]-SEL[17]-GBY[18]-RS[19]-GBY[20]-SEL[21]-FS[22] --- -SEL[8]-UDTF[9]-LVJ[10] --- -SEL[23]-GBY[24]-RS[25]-GBY[26]-SEL[27]-FS[28] -explain +PREHOOK: query: explain from src_10 insert overwrite table src_lv1 select C, sum(distinct key) lateral view explode(array(key+1, key+2)) A as C group by C insert overwrite table src_lv2 select C, sum(distinct key) lateral view explode(array(key+3, key+4)) A as C group by C insert overwrite table src_lv3 select value, sum(distinct key) group by value PREHOOK: type: QUERY -POSTHOOK: query: -- todo: shared distinct columns (should work with hive.optimize.multigroupby.common.distincts) --- 2(LV+GBY) + RS:2GBY --- TS[0]-LVF[1]-SEL[2]-LVJ[5]-SEL[11]-GBY[12]-RS[13]-GBY[14]-SEL[15]-FS[16] --- -SEL[3]-UDTF[4]-LVJ[5] --- -LVF[6]-SEL[7]-LVJ[10]-SEL[17]-GBY[18]-RS[19]-GBY[20]-SEL[21]-FS[22] --- -SEL[8]-UDTF[9]-LVJ[10] --- -SEL[23]-GBY[24]-RS[25]-GBY[26]-SEL[27]-FS[28] -explain +POSTHOOK: query: explain from src_10 insert overwrite table src_lv1 select C, sum(distinct key) lateral view explode(array(key+1, key+2)) A as C group by C insert overwrite table src_lv2 select C, sum(distinct key) lateral view explode(array(key+3, key+4)) A as C group by C @@ -1135,16 +1087,14 @@ POSTHOOK: query: create table src_lv4 (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_lv4 -PREHOOK: query: -- Common distincts optimization works across non-lateral view queries, but not across lateral view multi inserts -explain +PREHOOK: query: explain from src_10 insert overwrite table src_lv1 select key, sum(distinct C) lateral view explode(array(key+1, key+2)) A as C group by key insert overwrite table src_lv2 select key, sum(distinct C) lateral view explode(array(key+3, key+4)) A as C group by key insert overwrite table src_lv3 select value, sum(distinct key) where key > 200 group by value insert overwrite table src_lv4 select value, sum(distinct key) where key < 200 group by value PREHOOK: type: QUERY -POSTHOOK: query: -- Common distincts optimization works across non-lateral view queries, but not across lateral view multi inserts -explain +POSTHOOK: query: explain from src_10 insert overwrite table src_lv1 select key, sum(distinct C) lateral view explode(array(key+1, key+2)) A as C group by key insert overwrite table src_lv2 select key, sum(distinct C) lateral view explode(array(key+3, key+4)) A as C group by key diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_mixed.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_mixed.q.out index bc9473a..9d90d93 100644 --- a/ql/src/test/results/clientpositive/spark/multi_insert_mixed.q.out +++ b/ql/src/test/results/clientpositive/spark/multi_insert_mixed.q.out @@ -22,19 +22,13 @@ POSTHOOK: query: create table src_multi3 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_multi3 -PREHOOK: query: -- Testing the case where a map work contains both shuffling (ReduceSinkOperator) --- and inserting to output table (FileSinkOperator). - -explain +PREHOOK: query: explain from src insert overwrite table src_multi1 select key, count(1) group by key order by key insert overwrite table src_multi2 select value, count(1) group by value order by value insert overwrite table src_multi3 select * where key < 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Testing the case where a map work contains both shuffling (ReduceSinkOperator) --- and inserting to output table (FileSinkOperator). - -explain +POSTHOOK: query: explain from src insert overwrite table src_multi1 select key, count(1) group by key order by key insert overwrite table src_multi2 select value, count(1) group by value order by value diff --git a/ql/src/test/results/clientpositive/spark/multi_join_union.q.out b/ql/src/test/results/clientpositive/spark/multi_join_union.q.out index 52b3c74..513be62 100644 --- a/ql/src/test/results/clientpositive/spark/multi_join_union.q.out +++ b/ql/src/test/results/clientpositive/spark/multi_join_union.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src11 as SELECT * FROM src +PREHOOK: query: CREATE TABLE src11 as SELECT * FROM src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@src11 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src11 as SELECT * FROM src +POSTHOOK: query: CREATE TABLE src11 as SELECT * FROM src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out b/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out index fd1fa8b..ee6883f 100644 --- a/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out +++ b/ql/src/test/results/clientpositive/spark/optimize_nullscan.q.out @@ -1,17 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- Disable CBO here, because it messes with the cases specifically crafted for the optimizer. --- Instead, we could improve the optimizer to recognize more cases, e.g. filter before join. - -explain extended +PREHOOK: query: explain extended select key from src where false PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- Disable CBO here, because it messes with the cases specifically crafted for the optimizer. --- Instead, we could improve the optimizer to recognize more cases, e.g. filter before join. - -explain extended +POSTHOOK: query: explain extended select key from src where false POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/orc_merge1.q.out b/ql/src/test/results/clientpositive/spark/orc_merge1.q.out index 86df0a7..1407616 100644 --- a/ql/src/test/results/clientpositive/spark/orc_merge1.q.out +++ b/ql/src/test/results/clientpositive/spark/orc_merge1.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE orcfile_merge1 +PREHOOK: query: DROP TABLE orcfile_merge1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE orcfile_merge1 +POSTHOOK: query: DROP TABLE orcfile_merge1 POSTHOOK: type: DROPTABLE PREHOOK: query: DROP TABLE orcfile_merge1b PREHOOK: type: DROPTABLE @@ -44,14 +40,12 @@ POSTHOOK: query: CREATE TABLE orcfile_merge1c (key INT, value STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@orcfile_merge1c -PREHOOK: query: -- merge disabled -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- merge disabled -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src @@ -119,14 +113,12 @@ POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).key EXPRESSION [(src)sr POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 2 items #### A masked pattern was here #### -PREHOOK: query: -- auto-merge slow way -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- auto-merge slow way -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src @@ -244,14 +236,12 @@ POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).key EXPRESSION [(src)s POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items #### A masked pattern was here #### -PREHOOK: query: -- auto-merge fast way -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- auto-merge fast way -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src @@ -361,8 +351,7 @@ POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).key EXPRESSION [(src)s POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items #### A masked pattern was here #### -PREHOOK: query: -- Verify -SELECT SUM(HASH(c)) FROM ( +PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM orcfile_merge1 WHERE ds='1' ) t @@ -371,8 +360,7 @@ PREHOOK: Input: default@orcfile_merge1 PREHOOK: Input: default@orcfile_merge1@ds=1/part=0 PREHOOK: Input: default@orcfile_merge1@ds=1/part=1 #### A masked pattern was here #### -POSTHOOK: query: -- Verify -SELECT SUM(HASH(c)) FROM ( +POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM orcfile_merge1 WHERE ds='1' ) t diff --git a/ql/src/test/results/clientpositive/spark/orc_merge5.q.out b/ql/src/test/results/clientpositive/spark/orc_merge5.q.out index 83721f5..1f8c869 100644 --- a/ql/src/test/results/clientpositive/spark/orc_merge5.q.out +++ b/ql/src/test/results/clientpositive/spark/orc_merge5.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@orc_merge5 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@orc_merge5 @@ -26,11 +22,9 @@ POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' in POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@orc_merge5 -PREHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 PREHOOK: type: QUERY -POSTHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -90,13 +84,11 @@ POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchem POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: -- 3 files total -analyze table orc_merge5b compute statistics noscan +PREHOOK: query: analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: -- 3 files total -analyze table orc_merge5b compute statistics noscan +POSTHOOK: query: analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b @@ -113,11 +105,9 @@ POSTHOOK: Input: default@orc_merge5b 13 bar 80.0 2 1969-12-31 16:00:05 2 foo 0.8 1 1969-12-31 16:00:00 5 eat 0.8 6 1969-12-31 16:00:20 -PREHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 PREHOOK: type: QUERY -POSTHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -219,13 +209,11 @@ POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchem POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: -- 1 file after merging -analyze table orc_merge5b compute statistics noscan +PREHOOK: query: analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: -- 1 file after merging -analyze table orc_merge5b compute statistics noscan +POSTHOOK: query: analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b @@ -309,13 +297,11 @@ POSTHOOK: query: alter table orc_merge5b concatenate POSTHOOK: type: ALTER_TABLE_MERGE POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -PREHOOK: query: -- 1 file after merging -analyze table orc_merge5b compute statistics noscan +PREHOOK: query: analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: -- 1 file after merging -analyze table orc_merge5b compute statistics noscan +POSTHOOK: query: analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b diff --git a/ql/src/test/results/clientpositive/spark/orc_merge6.q.out b/ql/src/test/results/clientpositive/spark/orc_merge6.q.out index b9b3960..be62fae 100644 --- a/ql/src/test/results/clientpositive/spark/orc_merge6.q.out +++ b/ql/src/test/results/clientpositive/spark/orc_merge6.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- orc file merge tests for static partitions -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@orc_merge5 -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- orc file merge tests for static partitions -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@orc_merge5 @@ -28,11 +22,9 @@ POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' in POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@orc_merge5 -PREHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: query: explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 PREHOOK: type: QUERY -POSTHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -108,14 +100,12 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).string1 SIMPLE [(orc POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: -- 3 files total -analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +PREHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: query: -- 3 files total -analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +POSTHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a @@ -160,11 +150,9 @@ POSTHOOK: Input: default@orc_merge5a@year=2001/hour=24 2 foo 0.8 1 1969-12-31 16:00:00 2001 24 5 eat 0.8 6 1969-12-31 16:00:20 2000 24 5 eat 0.8 6 1969-12-31 16:00:20 2001 24 -PREHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: query: explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 PREHOOK: type: QUERY -POSTHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: explain insert overwrite table orc_merge5a partition (year="2000",hour=24) select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -282,14 +270,12 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).string1 SIMPLE [(orc POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] POSTHOOK: Lineage: orc_merge5a PARTITION(year=2001,hour=24).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: -- 1 file after merging -analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +PREHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: query: -- 1 file after merging -analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +POSTHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a @@ -454,14 +440,12 @@ POSTHOOK: query: alter table orc_merge5a partition(year="2001",hour=24) concaten POSTHOOK: type: ALTER_PARTITION_MERGE POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@year=2001/hour=24 -PREHOOK: query: -- 1 file after merging -analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +PREHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@year=2000/hour=24 -POSTHOOK: query: -- 1 file after merging -analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan +POSTHOOK: query: analyze table orc_merge5a partition(year="2000",hour=24) compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a diff --git a/ql/src/test/results/clientpositive/spark/orc_merge7.q.out b/ql/src/test/results/clientpositive/spark/orc_merge7.q.out index 6c8bcfa..01e3eac 100644 --- a/ql/src/test/results/clientpositive/spark/orc_merge7.q.out +++ b/ql/src/test/results/clientpositive/spark/orc_merge7.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- orc merge file tests for dynamic partition case - -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@orc_merge5 -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- orc merge file tests for dynamic partition case - -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@orc_merge5 @@ -30,11 +22,9 @@ POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' in POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@orc_merge5 -PREHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 +PREHOOK: query: explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 PREHOOK: type: QUERY -POSTHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 +POSTHOOK: query: explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -141,14 +131,12 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)or POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: -- 3 files total -analyze table orc_merge5a partition(st=80.0) compute statistics noscan +PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=80.0 -POSTHOOK: query: -- 3 files total -analyze table orc_merge5a partition(st=80.0) compute statistics noscan +POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a @@ -196,11 +184,9 @@ POSTHOOK: Input: default@orc_merge5a@st=80.0 13 bar 80.0 2 1969-12-31 16:00:05 80.0 2 foo 0.8 1 1969-12-31 16:00:00 0.8 5 eat 0.8 6 1969-12-31 16:00:20 0.8 -PREHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 +PREHOOK: query: explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 PREHOOK: type: QUERY -POSTHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 +POSTHOOK: query: explain insert overwrite table orc_merge5a partition (st) select userid,string1,subtype,decimal1,ts,subtype from orc_merge5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -349,14 +335,12 @@ POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).string1 SIMPLE [(orc_merge5)or POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] POSTHOOK: Lineage: orc_merge5a PARTITION(st=80.0).userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: -- 1 file after merging -analyze table orc_merge5a partition(st=80.0) compute statistics noscan +PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=80.0 -POSTHOOK: query: -- 1 file after merging -analyze table orc_merge5a partition(st=80.0) compute statistics noscan +POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a @@ -562,14 +546,12 @@ POSTHOOK: query: alter table orc_merge5a partition(st=0.8) concatenate POSTHOOK: type: ALTER_PARTITION_MERGE POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a@st=0.8 -PREHOOK: query: -- 1 file after merging -analyze table orc_merge5a partition(st=80.0) compute statistics noscan +PREHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5a PREHOOK: Output: default@orc_merge5a PREHOOK: Output: default@orc_merge5a@st=80.0 -POSTHOOK: query: -- 1 file after merging -analyze table orc_merge5a partition(st=80.0) compute statistics noscan +POSTHOOK: query: analyze table orc_merge5a partition(st=80.0) compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5a POSTHOOK: Output: default@orc_merge5a diff --git a/ql/src/test/results/clientpositive/spark/orc_merge9.q.out b/ql/src/test/results/clientpositive/spark/orc_merge9.q.out index bdf0fd3..28e51b2 100644 --- a/ql/src/test/results/clientpositive/spark/orc_merge9.q.out +++ b/ql/src/test/results/clientpositive/spark/orc_merge9.q.out @@ -64,15 +64,11 @@ POSTHOOK: Input: default@ts_merge 50000 Found 1 items #### A masked pattern was here #### -PREHOOK: query: -- incompatible merge test (stripe statistics missing) - -create table a_merge like alltypesorc +PREHOOK: query: create table a_merge like alltypesorc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@a_merge -POSTHOOK: query: -- incompatible merge test (stripe statistics missing) - -create table a_merge like alltypesorc +POSTHOOK: query: create table a_merge like alltypesorc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@a_merge diff --git a/ql/src/test/results/clientpositive/spark/orc_merge_diff_fs.q.out b/ql/src/test/results/clientpositive/spark/orc_merge_diff_fs.q.out index 86df0a7..1407616 100644 --- a/ql/src/test/results/clientpositive/spark/orc_merge_diff_fs.q.out +++ b/ql/src/test/results/clientpositive/spark/orc_merge_diff_fs.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE orcfile_merge1 +PREHOOK: query: DROP TABLE orcfile_merge1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE orcfile_merge1 +POSTHOOK: query: DROP TABLE orcfile_merge1 POSTHOOK: type: DROPTABLE PREHOOK: query: DROP TABLE orcfile_merge1b PREHOOK: type: DROPTABLE @@ -44,14 +40,12 @@ POSTHOOK: query: CREATE TABLE orcfile_merge1c (key INT, value STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@orcfile_merge1c -PREHOOK: query: -- merge disabled -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- merge disabled -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1 PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src @@ -119,14 +113,12 @@ POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).key EXPRESSION [(src)sr POSTHOOK: Lineage: orcfile_merge1 PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 2 items #### A masked pattern was here #### -PREHOOK: query: -- auto-merge slow way -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- auto-merge slow way -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1b PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src @@ -244,14 +236,12 @@ POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).key EXPRESSION [(src)s POSTHOOK: Lineage: orcfile_merge1b PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items #### A masked pattern was here #### -PREHOOK: query: -- auto-merge fast way -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- auto-merge fast way -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE orcfile_merge1c PARTITION (ds='1', part) SELECT key, value, PMOD(HASH(key), 2) as part FROM src @@ -361,8 +351,7 @@ POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).key EXPRESSION [(src)s POSTHOOK: Lineage: orcfile_merge1c PARTITION(ds=1,part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] Found 1 items #### A masked pattern was here #### -PREHOOK: query: -- Verify -SELECT SUM(HASH(c)) FROM ( +PREHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM orcfile_merge1 WHERE ds='1' ) t @@ -371,8 +360,7 @@ PREHOOK: Input: default@orcfile_merge1 PREHOOK: Input: default@orcfile_merge1@ds=1/part=0 PREHOOK: Input: default@orcfile_merge1@ds=1/part=1 #### A masked pattern was here #### -POSTHOOK: query: -- Verify -SELECT SUM(HASH(c)) FROM ( +POSTHOOK: query: SELECT SUM(HASH(c)) FROM ( SELECT TRANSFORM(*) USING 'tr \t _' AS (c) FROM orcfile_merge1 WHERE ds='1' ) t diff --git a/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out b/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out index d092e6a..65790c4 100644 --- a/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out +++ b/ql/src/test/results/clientpositive/spark/orc_merge_incompat1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@orc_merge5 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@orc_merge5 @@ -26,11 +22,9 @@ POSTHOOK: query: load data local inpath '../../data/files/orc_split_elim.orc' in POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@orc_merge5 -PREHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +PREHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 PREHOOK: type: QUERY -POSTHOOK: query: -- 3 mappers -explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 +POSTHOOK: query: explain insert overwrite table orc_merge5b select userid,string1,subtype,decimal1,ts from orc_merge5 where userid<=13 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -154,13 +148,11 @@ POSTHOOK: Lineage: orc_merge5b.string1 SIMPLE [(orc_merge5)orc_merge5.FieldSchem POSTHOOK: Lineage: orc_merge5b.subtype SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:subtype, type:double, comment:null), ] POSTHOOK: Lineage: orc_merge5b.ts SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:ts, type:timestamp, comment:null), ] POSTHOOK: Lineage: orc_merge5b.userid SIMPLE [(orc_merge5)orc_merge5.FieldSchema(name:userid, type:bigint, comment:null), ] -PREHOOK: query: -- 5 files total -analyze table orc_merge5b compute statistics noscan +PREHOOK: query: analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: -- 5 files total -analyze table orc_merge5b compute statistics noscan +POSTHOOK: query: analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b @@ -200,13 +192,11 @@ POSTHOOK: query: alter table orc_merge5b concatenate POSTHOOK: type: ALTER_TABLE_MERGE POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b -PREHOOK: query: -- 3 file after merging - all 0.12 format files will be merged and 0.11 files will be left behind -analyze table orc_merge5b compute statistics noscan +PREHOOK: query: analyze table orc_merge5b compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@orc_merge5b PREHOOK: Output: default@orc_merge5b -POSTHOOK: query: -- 3 file after merging - all 0.12 format files will be merged and 0.11 files will be left behind -analyze table orc_merge5b compute statistics noscan +POSTHOOK: query: analyze table orc_merge5b compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_merge5b POSTHOOK: Output: default@orc_merge5b diff --git a/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out b/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out index 90a8f59..52973c8 100644 --- a/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out +++ b/ql/src/test/results/clientpositive/spark/orc_merge_incompat2.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- orc merge file tests for dynamic partition case - -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +PREHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@orc_merge5 -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- orc merge file tests for dynamic partition case - -create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc +POSTHOOK: query: create table orc_merge5 (userid bigint, string1 string, subtype double, decimal1 decimal, ts timestamp) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@orc_merge5 diff --git a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out b/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out index 360abc9..836816b 100644 --- a/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/spark/outer_join_ppr.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED FROM src a FULL OUTER JOIN @@ -9,9 +7,7 @@ EXPLAIN EXTENDED SELECT a.key, a.value, b.key, b.value WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED FROM src a FULL OUTER JOIN diff --git a/ql/src/test/results/clientpositive/spark/parallel.q.out b/ql/src/test/results/clientpositive/spark/parallel.q.out index 4d05fac..e31fcf0 100644 --- a/ql/src/test/results/clientpositive/spark/parallel.q.out +++ b/ql/src/test/results/clientpositive/spark/parallel.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table if not exists src_a like src +PREHOOK: query: create table if not exists src_a like src PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_a -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table if not exists src_a like src +POSTHOOK: query: create table if not exists src_a like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src_a diff --git a/ql/src/test/results/clientpositive/spark/parallel_join0.q.out b/ql/src/test/results/clientpositive/spark/parallel_join0.q.out index 4989135..340d460 100644 --- a/ql/src/test/results/clientpositive/spark/parallel_join0.q.out +++ b/ql/src/test/results/clientpositive/spark/parallel_join0.q.out @@ -1,7 +1,5 @@ Warning: Shuffle Join JOIN[8][tables = [src1, src2]] in Work 'Reducer 2' is a cross product -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 FROM (SELECT * FROM src WHERE src.key < 10) src1 @@ -9,9 +7,7 @@ SELECT src1.key as k1, src1.value as v1, (SELECT * FROM src WHERE src.key < 10) src2 SORT BY k1, v1, k2, v2 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT src1.key as k1, src1.value as v1, src2.key as k2, src2.value as v2 FROM (SELECT * FROM src WHERE src.key < 10) src1 diff --git a/ql/src/test/results/clientpositive/spark/parallel_join1.q.out b/ql/src/test/results/clientpositive/spark/parallel_join1.q.out index 1177940..7fdd48d 100644 --- a/ql/src/test/results/clientpositive/spark/parallel_join1.q.out +++ b/ql/src/test/results/clientpositive/spark/parallel_join1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_j1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_j1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_j1 diff --git a/ql/src/test/results/clientpositive/spark/parallel_orderby.q.out b/ql/src/test/results/clientpositive/spark/parallel_orderby.q.out index 53f3164..483e42d 100644 --- a/ql/src/test/results/clientpositive/spark/parallel_orderby.q.out +++ b/ql/src/test/results/clientpositive/spark/parallel_orderby.q.out @@ -190,15 +190,11 @@ POSTHOOK: Input: default@total_ordered 86 val_86 98 val_98 98 val_98 -PREHOOK: query: -- rolling back to single task in case that the number of sample is not enough - -drop table total_ordered +PREHOOK: query: drop table total_ordered PREHOOK: type: DROPTABLE PREHOOK: Input: default@total_ordered PREHOOK: Output: default@total_ordered -POSTHOOK: query: -- rolling back to single task in case that the number of sample is not enough - -drop table total_ordered +POSTHOOK: query: drop table total_ordered POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@total_ordered POSTHOOK: Output: default@total_ordered diff --git a/ql/src/test/results/clientpositive/spark/parquet_join.q.out b/ql/src/test/results/clientpositive/spark/parquet_join.q.out index a8d98e8..e4afb15 100644 --- a/ql/src/test/results/clientpositive/spark/parquet_join.q.out +++ b/ql/src/test/results/clientpositive/spark/parquet_join.q.out @@ -61,17 +61,9 @@ POSTHOOK: Output: default@parquet_jointable2 POSTHOOK: Lineage: parquet_jointable2.c1 EXPRESSION [(staging)staging.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: parquet_jointable2.key SIMPLE [(staging)staging.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: parquet_jointable2.myvalue EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: -- SORT_QUERY_RESULTS - --- MR join - -explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key +PREHOOK: query: explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- MR join - -explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key +POSTHOOK: query: explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -159,21 +151,9 @@ POSTHOOK: Input: default@parquet_jointable2 #### A masked pattern was here #### val_0value val_10value -PREHOOK: query: -- The two tables involved in the join have differing number of columns(table1-2,table2-3). In case of Map and SMB join, --- when the second table is loaded, the column indices in hive.io.file.readcolumn.ids refer to columns of both the first and the second table --- and hence the parquet schema/types passed to ParquetInputSplit should contain only the column indexes belonging to second/current table - --- Map join - -explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key +PREHOOK: query: explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key PREHOOK: type: QUERY -POSTHOOK: query: -- The two tables involved in the join have differing number of columns(table1-2,table2-3). In case of Map and SMB join, --- when the second table is loaded, the column indices in hive.io.file.readcolumn.ids refer to columns of both the first and the second table --- and hence the parquet schema/types passed to ParquetInputSplit should contain only the column indexes belonging to second/current table - --- Map join - -explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key +POSTHOOK: query: explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage @@ -262,15 +242,11 @@ POSTHOOK: Input: default@parquet_jointable2 #### A masked pattern was here #### val_0value val_10value -PREHOOK: query: -- SMB join - -create table parquet_jointable1_bucketed_sorted (key int,value string) clustered by (key) sorted by (key ASC) INTO 1 BUCKETS stored as parquet +PREHOOK: query: create table parquet_jointable1_bucketed_sorted (key int,value string) clustered by (key) sorted by (key ASC) INTO 1 BUCKETS stored as parquet PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@parquet_jointable1_bucketed_sorted -POSTHOOK: query: -- SMB join - -create table parquet_jointable1_bucketed_sorted (key int,value string) clustered by (key) sorted by (key ASC) INTO 1 BUCKETS stored as parquet +POSTHOOK: query: create table parquet_jointable1_bucketed_sorted (key int,value string) clustered by (key) sorted by (key ASC) INTO 1 BUCKETS stored as parquet POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@parquet_jointable1_bucketed_sorted diff --git a/ql/src/test/results/clientpositive/spark/pcr.q.out b/ql/src/test/results/clientpositive/spark/pcr.q.out index 69f2643..9ff663f 100644 --- a/ql/src/test/results/clientpositive/spark/pcr.q.out +++ b/ql/src/test/results/clientpositive/spark/pcr.q.out @@ -4414,11 +4414,9 @@ POSTHOOK: query: drop table pcr_t3 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@pcr_t3 POSTHOOK: Output: default@pcr_t3 -PREHOOK: query: -- Test cases when a non-boolean ds expression has same and different values for all possible ds values: -drop table pcr_foo +PREHOOK: query: drop table pcr_foo PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Test cases when a non-boolean ds expression has same and different values for all possible ds values: -drop table pcr_foo +POSTHOOK: query: drop table pcr_foo POSTHOOK: type: DROPTABLE PREHOOK: query: create table pcr_foo (key int, value string) partitioned by (ds int) PREHOOK: type: CREATETABLE @@ -4458,16 +4456,14 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@pcr_foo@ds=7 POSTHOOK: Lineage: pcr_foo PARTITION(ds=7).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: pcr_foo PARTITION(ds=7).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- the condition is 'true' for all the 3 partitions (ds=3,5,7): -select key, value, ds from pcr_foo where (ds % 2 == 1) +PREHOOK: query: select key, value, ds from pcr_foo where (ds % 2 == 1) PREHOOK: type: QUERY PREHOOK: Input: default@pcr_foo PREHOOK: Input: default@pcr_foo@ds=3 PREHOOK: Input: default@pcr_foo@ds=5 PREHOOK: Input: default@pcr_foo@ds=7 #### A masked pattern was here #### -POSTHOOK: query: -- the condition is 'true' for all the 3 partitions (ds=3,5,7): -select key, value, ds from pcr_foo where (ds % 2 == 1) +POSTHOOK: query: select key, value, ds from pcr_foo where (ds % 2 == 1) POSTHOOK: type: QUERY POSTHOOK: Input: default@pcr_foo POSTHOOK: Input: default@pcr_foo@ds=3 @@ -4504,15 +4500,13 @@ POSTHOOK: Input: default@pcr_foo@ds=7 5 val_5 7 8 val_8 7 9 val_9 7 -PREHOOK: query: -- the condition is 'true' for partitions (ds=3,5) but 'false' of partition ds=7: -select key, value, ds from pcr_foo where (ds / 3 < 2) +PREHOOK: query: select key, value, ds from pcr_foo where (ds / 3 < 2) PREHOOK: type: QUERY PREHOOK: Input: default@pcr_foo PREHOOK: Input: default@pcr_foo@ds=3 PREHOOK: Input: default@pcr_foo@ds=5 #### A masked pattern was here #### -POSTHOOK: query: -- the condition is 'true' for partitions (ds=3,5) but 'false' of partition ds=7: -select key, value, ds from pcr_foo where (ds / 3 < 2) +POSTHOOK: query: select key, value, ds from pcr_foo where (ds / 3 < 2) POSTHOOK: type: QUERY POSTHOOK: Input: default@pcr_foo POSTHOOK: Input: default@pcr_foo@ds=3 @@ -4546,18 +4540,14 @@ POSTHOOK: query: drop table pcr_foo POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@pcr_foo POSTHOOK: Output: default@pcr_foo -PREHOOK: query: -- Cover org.apache.hadoop.hive.ql.optimizer.pcr.PcrExprProcFactory.FieldExprProcessor. --- Create a table with a struct data: -create table ab(strct struct) +PREHOOK: query: create table ab(strct struct) row format delimited fields terminated by '\t' collection items terminated by '\001' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@ab -POSTHOOK: query: -- Cover org.apache.hadoop.hive.ql.optimizer.pcr.PcrExprProcFactory.FieldExprProcessor. --- Create a table with a struct data: -create table ab(strct struct) +POSTHOOK: query: create table ab(strct struct) row format delimited fields terminated by '\t' collection items terminated by '\001' @@ -4574,11 +4564,9 @@ overwrite into table ab POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@ab -PREHOOK: query: -- Create partitioned table with struct data: -drop table foo_field +PREHOOK: query: drop table foo_field PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Create partitioned table with struct data: -drop table foo_field +POSTHOOK: query: drop table foo_field POSTHOOK: type: DROPTABLE PREHOOK: query: create table foo_field (s struct) partitioned by (ds int) PREHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out b/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out index 1866e37..1610847 100644 --- a/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out +++ b/ql/src/test/results/clientpositive/spark/ppd_gby_join.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT src1.c1, count(1) FROM (SELECT src.key AS c1, src.value AS c2 from src where src.key > '1' ) src1 @@ -10,9 +8,7 @@ ON src1.c1 = src2.c3 AND src1.c1 < '400' WHERE src1.c1 > '20' AND (src1.c2 < 'val_50' OR src1.c1 > '2') AND (src2.c3 > '50' OR src1.c1 < '50') AND (src2.c3 <> '4') GROUP BY src1.c1 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT src1.c1, count(1) FROM (SELECT src.key AS c1, src.value AS c2 from src where src.key > '1' ) src1 diff --git a/ql/src/test/results/clientpositive/spark/ppd_join.q.out b/ql/src/test/results/clientpositive/spark/ppd_join.q.out index aed4800..85cd8c0 100644 --- a/ql/src/test/results/clientpositive/spark/ppd_join.q.out +++ b/ql/src/test/results/clientpositive/spark/ppd_join.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src where src.key > '1' ) src1 @@ -9,9 +7,7 @@ JOIN ON src1.c1 = src2.c3 AND src1.c1 < '400' WHERE src1.c1 > '20' and (src1.c2 < 'val_50' or src1.c1 > '2') and (src2.c3 > '50' or src1.c1 < '50') and (src2.c3 <> '4') PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src where src.key > '1' ) src1 diff --git a/ql/src/test/results/clientpositive/spark/ppd_join2.q.out b/ql/src/test/results/clientpositive/spark/ppd_join2.q.out index a6b5d57..8d97034 100644 --- a/ql/src/test/results/clientpositive/spark/ppd_join2.q.out +++ b/ql/src/test/results/clientpositive/spark/ppd_join2.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src where src.key <> '302' ) src1 @@ -12,9 +10,7 @@ JOIN ON src1.c2 = src3.c6 WHERE src1.c1 <> '311' and (src1.c2 <> 'val_50' or src1.c1 > '1') and (src2.c3 <> '10' or src1.c1 <> '10') and (src2.c3 <> '14') and (sqrt(src3.c5) <> 13) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src where src.key <> '302' ) src1 diff --git a/ql/src/test/results/clientpositive/spark/ppd_join3.q.out b/ql/src/test/results/clientpositive/spark/ppd_join3.q.out index 6567587..99ded53 100644 --- a/ql/src/test/results/clientpositive/spark/ppd_join3.q.out +++ b/ql/src/test/results/clientpositive/spark/ppd_join3.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src where src.key <> '11' ) src1 @@ -12,9 +10,7 @@ JOIN ON src1.c1 = src3.c5 WHERE src1.c1 > '0' and (src1.c2 <> 'val_500' or src1.c1 > '1') and (src2.c3 > '10' or src1.c1 <> '10') and (src2.c3 <> '4') and (src3.c5 <> '1') PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT src1.c1, src2.c4 FROM (SELECT src.key as c1, src.value as c2 from src where src.key <> '11' ) src1 diff --git a/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out b/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out index 930a881..12b1724 100644 --- a/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out +++ b/ql/src/test/results/clientpositive/spark/ppd_multi_insert.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE mi1(key INT, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE mi1(key INT, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@mi1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE mi1(key INT, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE mi1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@mi1 diff --git a/ql/src/test/results/clientpositive/spark/ppd_outer_join1.q.out b/ql/src/test/results/clientpositive/spark/ppd_outer_join1.q.out index b2cbcf3..1fc8232 100644 --- a/ql/src/test/results/clientpositive/spark/ppd_outer_join1.q.out +++ b/ql/src/test/results/clientpositive/spark/ppd_outer_join1.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN FROM src a LEFT OUTER JOIN @@ -9,9 +7,7 @@ EXPLAIN SELECT a.key, a.value, b.key, b.value WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM src a LEFT OUTER JOIN diff --git a/ql/src/test/results/clientpositive/spark/ppd_outer_join2.q.out b/ql/src/test/results/clientpositive/spark/ppd_outer_join2.q.out index 9bb78d4..61dc19c 100644 --- a/ql/src/test/results/clientpositive/spark/ppd_outer_join2.q.out +++ b/ql/src/test/results/clientpositive/spark/ppd_outer_join2.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN FROM src a RIGHT OUTER JOIN @@ -9,9 +7,7 @@ EXPLAIN SELECT a.key, a.value, b.key, b.value WHERE a.key > '10' AND a.key < '20' AND b.key > '15' AND b.key < '25' PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM src a RIGHT OUTER JOIN diff --git a/ql/src/test/results/clientpositive/spark/ppd_outer_join3.q.out b/ql/src/test/results/clientpositive/spark/ppd_outer_join3.q.out index c69dd78..c47a424 100644 --- a/ql/src/test/results/clientpositive/spark/ppd_outer_join3.q.out +++ b/ql/src/test/results/clientpositive/spark/ppd_outer_join3.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN FROM src a FULL OUTER JOIN @@ -9,9 +7,7 @@ EXPLAIN SELECT a.key, a.value, b.key, b.value WHERE a.key > '10' AND a.key < '20' AND b.key > '15' AND b.key < '25' PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM src a FULL OUTER JOIN diff --git a/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out b/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out index 407f759..711c4c4 100644 --- a/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out +++ b/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN FROM src a LEFT OUTER JOIN @@ -12,9 +10,7 @@ EXPLAIN SELECT a.key, a.value, b.key, b.value, c.key WHERE a.key > '10' AND a.key < '20' AND b.key > '15' AND b.key < '25' AND sqrt(c.key) <> 13 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM src a LEFT OUTER JOIN diff --git a/ql/src/test/results/clientpositive/spark/ppd_transform.q.out b/ql/src/test/results/clientpositive/spark/ppd_transform.q.out index b93a841..b0753b6 100644 --- a/ql/src/test/results/clientpositive/spark/ppd_transform.q.out +++ b/ql/src/test/results/clientpositive/spark/ppd_transform.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN FROM ( FROM src SELECT TRANSFORM(src.key, src.value) @@ -9,9 +7,7 @@ FROM ( ) tmap SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM ( FROM src SELECT TRANSFORM(src.key, src.value) @@ -362,17 +358,13 @@ POSTHOOK: Input: default@src 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- test described in HIVE-4598 - -EXPLAIN +PREHOOK: query: EXPLAIN FROM ( FROM ( SELECT * FROM src ) mapout REDUCE * USING 'cat' AS x,y ) reduced #### A masked pattern was here #### PREHOOK: type: QUERY -POSTHOOK: query: -- test described in HIVE-4598 - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM ( FROM ( SELECT * FROM src ) mapout REDUCE * USING 'cat' AS x,y ) reduced diff --git a/ql/src/test/results/clientpositive/spark/ptf.q.out b/ql/src/test/results/clientpositive/spark/ptf.q.out index fd3533c..ff5d18f 100644 --- a/ql/src/test/results/clientpositive/spark/ptf.q.out +++ b/ql/src/test/results/clientpositive/spark/ptf.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - ---1. test1 -explain +PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -11,10 +8,7 @@ from noop(on part order by p_name ) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - ---1. test1 -explain +POSTHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -179,16 +173,14 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 2. testJoinWithNoop -explain +PREHOOK: query: explain select p_mfgr, p_name, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j distribute by j.p_mfgr sort by j.p_name) PREHOOK: type: QUERY -POSTHOOK: query: -- 2. testJoinWithNoop -explain +POSTHOOK: query: explain select p_mfgr, p_name, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz from noop (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j @@ -366,15 +358,13 @@ Manufacturer#5 almond antique medium spring khaki 6 -25 Manufacturer#5 almond antique sky peru orange 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 46 44 Manufacturer#5 almond azure blanched chiffon midnight 23 -23 -PREHOOK: query: -- 3. testOnlyPTF -explain +PREHOOK: query: explain select p_mfgr, p_name, p_size from noop(on part partition by p_mfgr order by p_name) PREHOOK: type: QUERY -POSTHOOK: query: -- 3. testOnlyPTF -explain +POSTHOOK: query: explain select p_mfgr, p_name, p_size from noop(on part partition by p_mfgr @@ -480,8 +470,7 @@ Manufacturer#5 almond antique medium spring khaki 6 Manufacturer#5 almond antique sky peru orange 2 Manufacturer#5 almond aquamarine dodger light gainsboro 46 Manufacturer#5 almond azure blanched chiffon midnight 23 -PREHOOK: query: -- 4. testPTFAlias -explain +PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -491,8 +480,7 @@ from noop(on part order by p_name ) abc PREHOOK: type: QUERY -POSTHOOK: query: -- 4. testPTFAlias -explain +POSTHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -657,8 +645,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 5. testPTFAndWhereWithWindowing -explain +PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -668,8 +655,7 @@ from noop(on part order by p_name ) PREHOOK: type: QUERY -POSTHOOK: query: -- 5. testPTFAndWhereWithWindowing -explain +POSTHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -835,8 +821,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 6 -25 Manufacturer#5 almond antique sky peru orange 2 3 3 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 46 44 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 23 -23 -PREHOOK: query: -- 6. testSWQAndPTFAndGBy -explain +PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -847,8 +832,7 @@ from noop(on part ) group by p_mfgr, p_name, p_size PREHOOK: type: QUERY -POSTHOOK: query: -- 6. testSWQAndPTFAndGBy -explain +POSTHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -1025,16 +1009,14 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 6 -25 Manufacturer#5 almond antique sky peru orange 2 3 3 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 46 44 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 23 -23 -PREHOOK: query: -- 7. testJoin -explain +PREHOOK: query: explain select abc.* from noop(on part partition by p_mfgr order by p_name ) abc join part p1 on abc.p_partkey = p1.p_partkey PREHOOK: type: QUERY -POSTHOOK: query: -- 7. testJoin -explain +POSTHOOK: query: explain select abc.* from noop(on part partition by p_mfgr @@ -1174,16 +1156,14 @@ POSTHOOK: Input: default@part 85768 almond antique chartreuse lavender yellow Manufacturer#1 Brand#12 LARGE BRUSHED STEEL 34 SM BAG 1753.76 refull 86428 almond aquamarine burnished black steel Manufacturer#1 Brand#12 STANDARD ANODIZED STEEL 28 WRAP BAG 1414.42 arefully 90681 almond antique chartreuse khaki white Manufacturer#3 Brand#31 MEDIUM BURNISHED TIN 17 SM CASE 1671.68 are slyly after the sl -PREHOOK: query: -- 8. testJoinRight -explain +PREHOOK: query: explain select abc.* from part p1 join noop(on part partition by p_mfgr order by p_name ) abc on abc.p_partkey = p1.p_partkey PREHOOK: type: QUERY -POSTHOOK: query: -- 8. testJoinRight -explain +POSTHOOK: query: explain select abc.* from part p1 join noop(on part partition by p_mfgr @@ -1327,16 +1307,14 @@ POSTHOOK: Input: default@part 85768 almond antique chartreuse lavender yellow Manufacturer#1 Brand#12 LARGE BRUSHED STEEL 34 SM BAG 1753.76 refull 86428 almond aquamarine burnished black steel Manufacturer#1 Brand#12 STANDARD ANODIZED STEEL 28 WRAP BAG 1414.42 arefully 90681 almond antique chartreuse khaki white Manufacturer#3 Brand#31 MEDIUM BURNISHED TIN 17 SM CASE 1671.68 are slyly after the sl -PREHOOK: query: -- 9. testNoopWithMap -explain +PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name, p_size desc) as r from noopwithmap(on part partition by p_mfgr order by p_name, p_size desc) PREHOOK: type: QUERY -POSTHOOK: query: -- 9. testNoopWithMap -explain +POSTHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name, p_size desc) as r from noopwithmap(on part @@ -1494,8 +1472,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 Manufacturer#5 almond antique sky peru orange 2 3 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 Manufacturer#5 almond azure blanched chiffon midnight 23 5 -PREHOOK: query: -- 10. testNoopWithMapWithWindowing -explain +PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -1504,8 +1481,7 @@ from noopwithmap(on part partition by p_mfgr order by p_name) PREHOOK: type: QUERY -POSTHOOK: query: -- 10. testNoopWithMapWithWindowing -explain +POSTHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -1684,8 +1660,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 11. testHavingWithWindowingPTFNoGBY -explain +PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -1694,8 +1669,7 @@ from noop(on part partition by p_mfgr order by p_name) PREHOOK: type: QUERY -POSTHOOK: query: -- 11. testHavingWithWindowingPTFNoGBY -explain +POSTHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -1857,8 +1831,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 12. testFunctionChain -explain +PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -1868,8 +1841,7 @@ partition by p_mfgr order by p_mfgr DESC, p_name ))) PREHOOK: type: QUERY -POSTHOOK: query: -- 12. testFunctionChain -explain +POSTHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -2092,8 +2064,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 13. testPTFAndWindowingInSubQ -explain +PREHOOK: query: explain select p_mfgr, p_name, sub1.cd, sub1.s1 from (select p_mfgr, p_name, @@ -2106,8 +2077,7 @@ order by p_name) window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) ) sub1 PREHOOK: type: QUERY -POSTHOOK: query: -- 13. testPTFAndWindowingInSubQ -explain +POSTHOOK: query: explain select p_mfgr, p_name, sub1.cd, sub1.s1 from (select p_mfgr, p_name, @@ -2273,8 +2243,7 @@ Manufacturer#5 almond antique medium spring khaki 2 6208.18 Manufacturer#5 almond antique sky peru orange 3 7672.66 Manufacturer#5 almond aquamarine dodger light gainsboro 4 5882.970000000001 Manufacturer#5 almond azure blanched chiffon midnight 5 4271.3099999999995 -PREHOOK: query: -- 14. testPTFJoinWithWindowingWithCount -explain +PREHOOK: query: explain select abc.p_mfgr, abc.p_name, rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r, dense_rank() over (distribute by abc.p_mfgr sort by abc.p_name) as dr, @@ -2286,8 +2255,7 @@ partition by p_mfgr order by p_name ) abc join part p1 on abc.p_partkey = p1.p_partkey PREHOOK: type: QUERY -POSTHOOK: query: -- 14. testPTFJoinWithWindowingWithCount -explain +POSTHOOK: query: explain select abc.p_mfgr, abc.p_name, rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r, dense_rank() over (distribute by abc.p_mfgr sort by abc.p_name) as dr, @@ -2506,15 +2474,13 @@ Manufacturer#5 almond antique medium spring khaki 2 2 2 1611.66 3401.35000000000 Manufacturer#5 almond antique sky peru orange 3 3 3 1788.73 5190.08 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 4 4 4 1018.1 6208.18 46 44 Manufacturer#5 almond azure blanched chiffon midnight 5 5 5 1464.48 7672.66 23 -23 -PREHOOK: query: -- 15. testDistinctInSelectWithPTF -explain +PREHOOK: query: explain select DISTINCT p_mfgr, p_name, p_size from noop(on part partition by p_mfgr order by p_name) PREHOOK: type: QUERY -POSTHOOK: query: -- 15. testDistinctInSelectWithPTF -explain +POSTHOOK: query: explain select DISTINCT p_mfgr, p_name, p_size from noop(on part partition by p_mfgr @@ -2637,8 +2603,7 @@ Manufacturer#5 almond antique medium spring khaki 6 Manufacturer#5 almond antique sky peru orange 2 Manufacturer#5 almond aquamarine dodger light gainsboro 46 Manufacturer#5 almond azure blanched chiffon midnight 23 -PREHOOK: query: -- 16. testViewAsTableInputToPTF -create view IF NOT EXISTS mfgr_price_view as +PREHOOK: query: create view IF NOT EXISTS mfgr_price_view as select p_mfgr, p_brand, sum(p_retailprice) as s from part @@ -2647,8 +2612,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@part PREHOOK: Output: database:default PREHOOK: Output: default@mfgr_price_view -POSTHOOK: query: -- 16. testViewAsTableInputToPTF -create view IF NOT EXISTS mfgr_price_view as +POSTHOOK: query: create view IF NOT EXISTS mfgr_price_view as select p_mfgr, p_brand, sum(p_retailprice) as s from part @@ -2814,8 +2778,7 @@ Manufacturer#4 Brand#42 2581.6800000000003 7337.620000000001 Manufacturer#5 Brand#51 1611.66 1611.66 Manufacturer#5 Brand#52 3254.17 4865.83 Manufacturer#5 Brand#53 2806.83 7672.66 -PREHOOK: query: -- 17. testMultipleInserts2SWQsWithPTF -CREATE TABLE part_4( +PREHOOK: query: CREATE TABLE part_4( p_mfgr STRING, p_name STRING, p_size INT, @@ -2825,8 +2788,7 @@ s DOUBLE) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@part_4 -POSTHOOK: query: -- 17. testMultipleInserts2SWQsWithPTF -CREATE TABLE part_4( +POSTHOOK: query: CREATE TABLE part_4( p_mfgr STRING, p_name STRING, p_size INT, @@ -3261,8 +3223,7 @@ Manufacturer#5 almond antique medium spring khaki 6 8 2 2 0.4 31 Manufacturer#5 almond antique sky peru orange 2 2 3 3 0.6 31 Manufacturer#5 almond aquamarine dodger light gainsboro 46 46 4 4 0.8 6 Manufacturer#5 almond azure blanched chiffon midnight 23 23 5 5 1.0 2 -PREHOOK: query: -- 18. testMulti2OperatorsFunctionChainWithMap -explain +PREHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name) as r, dense_rank() over (partition by p_mfgr,p_name) as dr, @@ -3279,8 +3240,7 @@ from noop(on partition by p_mfgr,p_name order by p_mfgr,p_name) PREHOOK: type: QUERY -POSTHOOK: query: -- 18. testMulti2OperatorsFunctionChainWithMap -explain +POSTHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name) as r, dense_rank() over (partition by p_mfgr,p_name) as dr, @@ -3531,8 +3491,7 @@ Manufacturer#5 almond antique medium spring khaki 1 1 6 6 Manufacturer#5 almond antique sky peru orange 1 1 2 2 Manufacturer#5 almond aquamarine dodger light gainsboro 1 1 46 46 Manufacturer#5 almond azure blanched chiffon midnight 1 1 23 23 -PREHOOK: query: -- 19. testMulti3OperatorsFunctionChain -explain +PREHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -3549,8 +3508,7 @@ from noop(on partition by p_mfgr order by p_mfgr ) PREHOOK: type: QUERY -POSTHOOK: query: -- 19. testMulti3OperatorsFunctionChain -explain +POSTHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -3797,8 +3755,7 @@ Manufacturer#5 almond antique medium spring khaki 2 2 6 37 Manufacturer#5 almond antique sky peru orange 3 3 2 39 Manufacturer#5 almond aquamarine dodger light gainsboro 4 4 46 85 Manufacturer#5 almond azure blanched chiffon midnight 5 5 23 108 -PREHOOK: query: -- 20. testMultiOperatorChainWithNoWindowing -explain +PREHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -3813,8 +3770,7 @@ from noop(on partition by p_mfgr order by p_mfgr)) PREHOOK: type: QUERY -POSTHOOK: query: -- 20. testMultiOperatorChainWithNoWindowing -explain +POSTHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -4035,8 +3991,7 @@ Manufacturer#5 almond antique medium spring khaki 2 2 6 37 Manufacturer#5 almond antique sky peru orange 3 3 2 39 Manufacturer#5 almond aquamarine dodger light gainsboro 4 4 46 85 Manufacturer#5 almond azure blanched chiffon midnight 5 5 23 108 -PREHOOK: query: -- 21. testMultiOperatorChainEndsWithNoopMap -explain +PREHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name) as r, dense_rank() over (partition by p_mfgr,p_name) as dr, @@ -4053,8 +4008,7 @@ from noopwithmap(on partition by p_mfgr,p_name order by p_mfgr,p_name) PREHOOK: type: QUERY -POSTHOOK: query: -- 21. testMultiOperatorChainEndsWithNoopMap -explain +POSTHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name) as r, dense_rank() over (partition by p_mfgr,p_name) as dr, @@ -4318,8 +4272,7 @@ Manufacturer#5 almond antique medium spring khaki 1 1 6 6 Manufacturer#5 almond antique sky peru orange 1 1 2 2 Manufacturer#5 almond aquamarine dodger light gainsboro 1 1 46 46 Manufacturer#5 almond azure blanched chiffon midnight 1 1 23 23 -PREHOOK: query: -- 22. testMultiOperatorChainWithDiffPartitionForWindow1 -explain +PREHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as r, dense_rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as dr, @@ -4335,8 +4288,7 @@ from noop(on order by p_mfgr )) PREHOOK: type: QUERY -POSTHOOK: query: -- 22. testMultiOperatorChainWithDiffPartitionForWindow1 -explain +POSTHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as r, dense_rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as dr, @@ -4577,8 +4529,7 @@ Manufacturer#5 almond antique medium spring khaki 1 1 6 6 6 Manufacturer#5 almond antique sky peru orange 1 1 2 2 2 Manufacturer#5 almond aquamarine dodger light gainsboro 1 1 46 46 46 Manufacturer#5 almond azure blanched chiffon midnight 1 1 23 23 23 -PREHOOK: query: -- 23. testMultiOperatorChainWithDiffPartitionForWindow2 -explain +PREHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -4592,8 +4543,7 @@ from noopwithmap(on order by p_mfgr, p_name) )) PREHOOK: type: QUERY -POSTHOOK: query: -- 23. testMultiOperatorChainWithDiffPartitionForWindow2 -explain +POSTHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, diff --git a/ql/src/test/results/clientpositive/spark/ptf_decimal.q.out b/ql/src/test/results/clientpositive/spark/ptf_decimal.q.out index 8494e97..36a5af9 100644 --- a/ql/src/test/results/clientpositive/spark/ptf_decimal.q.out +++ b/ql/src/test/results/clientpositive/spark/ptf_decimal.q.out @@ -1,8 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 1. aggregate functions with decimal type - -select p_mfgr, p_retailprice, +PREHOOK: query: select p_mfgr, p_retailprice, lead(p_retailprice) over (partition by p_mfgr ORDER BY p_name) as c1, lag(p_retailprice) over (partition by p_mfgr ORDER BY p_name) as c2, first_value(p_retailprice) over (partition by p_mfgr ORDER BY p_name) as c3, @@ -11,11 +7,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 1. aggregate functions with decimal type - -select p_mfgr, p_retailprice, +POSTHOOK: query: select p_mfgr, p_retailprice, lead(p_retailprice) over (partition by p_mfgr ORDER BY p_name) as c1, lag(p_retailprice) over (partition by p_mfgr ORDER BY p_name) as c2, first_value(p_retailprice) over (partition by p_mfgr ORDER BY p_name) as c3, @@ -50,9 +42,7 @@ Manufacturer#5 1464.48 NULL 1018.1 1789.69 1464.48 Manufacturer#5 1611.66 1788.73 1789.69 1789.69 1611.66 Manufacturer#5 1788.73 1018.1 1611.66 1789.69 1788.73 Manufacturer#5 1789.69 1611.66 NULL 1789.69 1789.69 -PREHOOK: query: -- 2. ranking functions with decimal type - -select p_mfgr, p_retailprice, +PREHOOK: query: select p_mfgr, p_retailprice, row_number() over (PARTITION BY p_mfgr ORDER BY p_retailprice) as c1, rank() over (PARTITION BY p_mfgr ORDER BY p_retailprice) as c2, dense_rank() over (PARTITION BY p_mfgr ORDER BY p_retailprice) as c3, @@ -63,9 +53,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 2. ranking functions with decimal type - -select p_mfgr, p_retailprice, +POSTHOOK: query: select p_mfgr, p_retailprice, row_number() over (PARTITION BY p_mfgr ORDER BY p_retailprice) as c1, rank() over (PARTITION BY p_mfgr ORDER BY p_retailprice) as c2, dense_rank() over (PARTITION BY p_mfgr ORDER BY p_retailprice) as c3, @@ -102,17 +90,13 @@ Manufacturer#5 1464.48 2 2 2 0.25 0.4 2 Manufacturer#5 1611.66 3 3 3 0.5 0.6 3 Manufacturer#5 1788.73 4 4 4 0.75 0.8 4 Manufacturer#5 1789.69 5 5 5 1.0 1.0 5 -PREHOOK: query: -- 3. order by decimal - -select p_mfgr, p_retailprice, +PREHOOK: query: select p_mfgr, p_retailprice, lag(p_retailprice) over (partition by p_mfgr ORDER BY p_retailprice desc) as c1 from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 3. order by decimal - -select p_mfgr, p_retailprice, +POSTHOOK: query: select p_mfgr, p_retailprice, lag(p_retailprice) over (partition by p_mfgr ORDER BY p_retailprice desc) as c1 from part POSTHOOK: type: QUERY @@ -144,17 +128,13 @@ Manufacturer#5 1464.48 1611.66 Manufacturer#5 1611.66 1788.73 Manufacturer#5 1788.73 1789.69 Manufacturer#5 1789.69 NULL -PREHOOK: query: -- 4. partition by decimal - -select p_mfgr, p_retailprice, +PREHOOK: query: select p_mfgr, p_retailprice, lag(p_retailprice) over (partition by p_retailprice) as c1 from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 4. partition by decimal - -select p_mfgr, p_retailprice, +POSTHOOK: query: select p_mfgr, p_retailprice, lag(p_retailprice) over (partition by p_retailprice) as c1 from part POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/ptf_general_queries.q.out b/ql/src/test/results/clientpositive/spark/ptf_general_queries.q.out index a3d6194..8ab2c22 100644 --- a/ql/src/test/results/clientpositive/spark/ptf_general_queries.q.out +++ b/ql/src/test/results/clientpositive/spark/ptf_general_queries.q.out @@ -1,17 +1,11 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 1. testNoPTFNoWindowing -select p_mfgr, p_name, p_size +PREHOOK: query: select p_mfgr, p_name, p_size from part distribute by p_mfgr sort by p_name PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 1. testNoPTFNoWindowing -select p_mfgr, p_name, p_size +POSTHOOK: query: select p_mfgr, p_name, p_size from part distribute by p_mfgr sort by p_name @@ -44,8 +38,7 @@ Manufacturer#5 almond antique medium spring khaki 6 Manufacturer#5 almond antique sky peru orange 2 Manufacturer#5 almond aquamarine dodger light gainsboro 46 Manufacturer#5 almond azure blanched chiffon midnight 23 -PREHOOK: query: -- 2. testUDAFsNoWindowingNoPTFNoGBY -select p_mfgr,p_name, p_retailprice, +PREHOOK: query: select p_mfgr,p_name, p_retailprice, sum(p_retailprice) over(partition by p_mfgr order by p_name) as s, min(p_retailprice) over(partition by p_mfgr order by p_name) as mi, max(p_retailprice) over(partition by p_mfgr order by p_name) as ma, @@ -54,8 +47,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 2. testUDAFsNoWindowingNoPTFNoGBY -select p_mfgr,p_name, p_retailprice, +POSTHOOK: query: select p_mfgr,p_name, p_retailprice, sum(p_retailprice) over(partition by p_mfgr order by p_name) as s, min(p_retailprice) over(partition by p_mfgr order by p_name) as mi, max(p_retailprice) over(partition by p_mfgr order by p_name) as ma, @@ -90,13 +82,11 @@ Manufacturer#5 almond antique medium spring khaki 1611.66 3401.3500000000004 161 Manufacturer#5 almond antique sky peru orange 1788.73 5190.08 1611.66 1789.69 1730.0266666666666 Manufacturer#5 almond aquamarine dodger light gainsboro 1018.1 6208.18 1018.1 1789.69 1552.045 Manufacturer#5 almond azure blanched chiffon midnight 1464.48 7672.66 1018.1 1789.69 1534.532 -PREHOOK: query: -- 3. testConstExprInSelect -select 'tst1' as key, count(1) as value from part +PREHOOK: query: select 'tst1' as key, count(1) as value from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 3. testConstExprInSelect -select 'tst1' as key, count(1) as value from part +POSTHOOK: query: select 'tst1' as key, count(1) as value from part POSTHOOK: type: QUERY POSTHOOK: Input: default@part #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/ptf_matchpath.q.out b/ql/src/test/results/clientpositive/spark/ptf_matchpath.q.out index 27be99f..45b59f1 100644 --- a/ql/src/test/results/clientpositive/spark/ptf_matchpath.q.out +++ b/ql/src/test/results/clientpositive/spark/ptf_matchpath.q.out @@ -34,10 +34,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/flights_tiny.txt' OVER POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@flights_tiny -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 1. basic Matchpath test -explain +PREHOOK: query: explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on flights_tiny @@ -48,10 +45,7 @@ from matchpath(on arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath') ) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 1. basic Matchpath test -explain +POSTHOOK: query: explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on flights_tiny @@ -164,8 +158,7 @@ Chicago 897 2010 10 20 4 20 Chicago 897 2010 10 21 3 21 Chicago 897 2010 10 22 2 22 Washington 7291 2010 10 27 2 27 -PREHOOK: query: -- 2. Matchpath on 1 partition -explain +PREHOOK: query: explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on flights_tiny @@ -176,8 +169,7 @@ from matchpath(on ) where fl_num = 1142 PREHOOK: type: QUERY -POSTHOOK: query: -- 2. Matchpath on 1 partition -explain +POSTHOOK: query: explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on flights_tiny @@ -282,8 +274,7 @@ Baltimore 1142 2010 10 21 5 21 Baltimore 1142 2010 10 22 4 22 Baltimore 1142 2010 10 25 3 25 Baltimore 1142 2010 10 26 2 26 -PREHOOK: query: -- 3. empty partition. -explain +PREHOOK: query: explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on (select * from flights_tiny where fl_num = -1142) flights_tiny @@ -293,8 +284,7 @@ from matchpath(on arg4('origin_city_name, fl_num, year, month, day_of_month, size(tpath) as sz, tpath[0].day_of_month as tpath') ) PREHOOK: type: QUERY -POSTHOOK: query: -- 3. empty partition. -explain +POSTHOOK: query: explain select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpath(on (select * from flights_tiny where fl_num = -1142) flights_tiny diff --git a/ql/src/test/results/clientpositive/spark/ptf_rcfile.q.out b/ql/src/test/results/clientpositive/spark/ptf_rcfile.q.out index ba83797..a6721fe 100644 --- a/ql/src/test/results/clientpositive/spark/ptf_rcfile.q.out +++ b/ql/src/test/results/clientpositive/spark/ptf_rcfile.q.out @@ -38,10 +38,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part.rc' overwrite int POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@part_rc -PREHOOK: query: -- SORT_QUERY_RESULTS - --- testWindowingPTFWithPartRC -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 @@ -51,10 +48,7 @@ order by p_name) PREHOOK: type: QUERY PREHOOK: Input: default@part_rc #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- testWindowingPTFWithPartRC -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 diff --git a/ql/src/test/results/clientpositive/spark/ptf_register_tblfn.q.out b/ql/src/test/results/clientpositive/spark/ptf_register_tblfn.q.out index 0edff7a..ad7c526 100644 --- a/ql/src/test/results/clientpositive/spark/ptf_register_tblfn.q.out +++ b/ql/src/test/results/clientpositive/spark/ptf_register_tblfn.q.out @@ -40,10 +40,7 @@ PREHOOK: Output: matchpathtest POSTHOOK: query: create temporary function matchpathtest as 'org.apache.hadoop.hive.ql.udf.ptf.MatchPath$MatchPathResolver' POSTHOOK: type: CREATEFUNCTION POSTHOOK: Output: matchpathtest -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 1. basic Matchpath test -select origin_city_name, fl_num, year, month, day_of_month, sz, tpath +PREHOOK: query: select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpathtest(on flights_tiny distribute by fl_num @@ -55,10 +52,7 @@ from matchpathtest(on PREHOOK: type: QUERY PREHOOK: Input: default@flights_tiny #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 1. basic Matchpath test -select origin_city_name, fl_num, year, month, day_of_month, sz, tpath +POSTHOOK: query: select origin_city_name, fl_num, year, month, day_of_month, sz, tpath from matchpathtest(on flights_tiny distribute by fl_num diff --git a/ql/src/test/results/clientpositive/spark/ptf_seqfile.q.out b/ql/src/test/results/clientpositive/spark/ptf_seqfile.q.out index aa270e5..044638f 100644 --- a/ql/src/test/results/clientpositive/spark/ptf_seqfile.q.out +++ b/ql/src/test/results/clientpositive/spark/ptf_seqfile.q.out @@ -38,10 +38,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part.seq' overwrite in POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@part_seq -PREHOOK: query: -- SORT_QUERY_RESULTS - --- testWindowingPTFWithPartSeqFile -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 @@ -51,10 +48,7 @@ order by p_name) PREHOOK: type: QUERY PREHOOK: Input: default@part_seq #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- testWindowingPTFWithPartSeqFile -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, sum(p_retailprice) over (partition by p_mfgr order by p_name rows between unbounded preceding and current row) as s1 diff --git a/ql/src/test/results/clientpositive/spark/ptf_streaming.q.out b/ql/src/test/results/clientpositive/spark/ptf_streaming.q.out index 8e47b11..40abc62 100644 --- a/ql/src/test/results/clientpositive/spark/ptf_streaming.q.out +++ b/ql/src/test/results/clientpositive/spark/ptf_streaming.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - ---1. test1 -explain +PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -11,10 +8,7 @@ from noopstreaming(on part order by p_name ) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - ---1. test1 -explain +POSTHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -179,16 +173,14 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 2. testJoinWithNoop -explain +PREHOOK: query: explain select p_mfgr, p_name, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz from noopstreaming (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j distribute by j.p_mfgr sort by j.p_name) PREHOOK: type: QUERY -POSTHOOK: query: -- 2. testJoinWithNoop -explain +POSTHOOK: query: explain select p_mfgr, p_name, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz from noopstreaming (on (select p1.* from part p1 join part p2 on p1.p_partkey = p2.p_partkey) j @@ -366,16 +358,14 @@ Manufacturer#5 almond antique medium spring khaki 6 -25 Manufacturer#5 almond antique sky peru orange 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 46 44 Manufacturer#5 almond azure blanched chiffon midnight 23 -23 -PREHOOK: query: -- 7. testJoin -explain +PREHOOK: query: explain select abc.* from noopstreaming(on part partition by p_mfgr order by p_name ) abc join part p1 on abc.p_partkey = p1.p_partkey PREHOOK: type: QUERY -POSTHOOK: query: -- 7. testJoin -explain +POSTHOOK: query: explain select abc.* from noopstreaming(on part partition by p_mfgr @@ -515,16 +505,14 @@ POSTHOOK: Input: default@part 85768 almond antique chartreuse lavender yellow Manufacturer#1 Brand#12 LARGE BRUSHED STEEL 34 SM BAG 1753.76 refull 86428 almond aquamarine burnished black steel Manufacturer#1 Brand#12 STANDARD ANODIZED STEEL 28 WRAP BAG 1414.42 arefully 90681 almond antique chartreuse khaki white Manufacturer#3 Brand#31 MEDIUM BURNISHED TIN 17 SM CASE 1671.68 are slyly after the sl -PREHOOK: query: -- 9. testNoopWithMap -explain +PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name, p_size desc) as r from noopwithmapstreaming(on part partition by p_mfgr order by p_name, p_size desc) PREHOOK: type: QUERY -POSTHOOK: query: -- 9. testNoopWithMap -explain +POSTHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name, p_size desc) as r from noopwithmapstreaming(on part @@ -682,8 +670,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 Manufacturer#5 almond antique sky peru orange 2 3 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 Manufacturer#5 almond azure blanched chiffon midnight 23 5 -PREHOOK: query: -- 10. testNoopWithMapWithWindowing -explain +PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -692,8 +679,7 @@ from noopwithmapstreaming(on part partition by p_mfgr order by p_name) PREHOOK: type: QUERY -POSTHOOK: query: -- 10. testNoopWithMapWithWindowing -explain +POSTHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -872,8 +858,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 12. testFunctionChain -explain +PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -883,8 +868,7 @@ partition by p_mfgr order by p_mfgr, p_name ))) PREHOOK: type: QUERY -POSTHOOK: query: -- 12. testFunctionChain -explain +POSTHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -1107,8 +1091,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 12.1 testFunctionChain -explain +PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -1118,8 +1101,7 @@ partition by p_mfgr order by p_mfgr, p_name ))) PREHOOK: type: QUERY -POSTHOOK: query: -- 12.1 testFunctionChain -explain +POSTHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -1342,8 +1324,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 12.2 testFunctionChain -explain +PREHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -1353,8 +1334,7 @@ partition by p_mfgr order by p_mfgr, p_name ))) PREHOOK: type: QUERY -POSTHOOK: query: -- 12.2 testFunctionChain -explain +POSTHOOK: query: explain select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -1577,8 +1557,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 14. testPTFJoinWithWindowingWithCount -explain +PREHOOK: query: explain select abc.p_mfgr, abc.p_name, rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r, dense_rank() over (distribute by abc.p_mfgr sort by abc.p_name) as dr, @@ -1590,8 +1569,7 @@ partition by p_mfgr order by p_name ) abc join part p1 on abc.p_partkey = p1.p_partkey PREHOOK: type: QUERY -POSTHOOK: query: -- 14. testPTFJoinWithWindowingWithCount -explain +POSTHOOK: query: explain select abc.p_mfgr, abc.p_name, rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r, dense_rank() over (distribute by abc.p_mfgr sort by abc.p_name) as dr, @@ -1810,8 +1788,7 @@ Manufacturer#5 almond antique medium spring khaki 2 2 2 1611.66 3401.35000000000 Manufacturer#5 almond antique sky peru orange 3 3 3 1788.73 5190.08 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 4 4 4 1018.1 6208.18 46 44 Manufacturer#5 almond azure blanched chiffon midnight 5 5 5 1464.48 7672.66 23 -23 -PREHOOK: query: -- 18. testMulti2OperatorsFunctionChainWithMap -explain +PREHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name) as r, dense_rank() over (partition by p_mfgr,p_name) as dr, @@ -1828,8 +1805,7 @@ from noopstreaming(on partition by p_mfgr,p_name order by p_mfgr,p_name) PREHOOK: type: QUERY -POSTHOOK: query: -- 18. testMulti2OperatorsFunctionChainWithMap -explain +POSTHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name) as r, dense_rank() over (partition by p_mfgr,p_name) as dr, @@ -2080,8 +2056,7 @@ Manufacturer#5 almond antique medium spring khaki 1 1 6 6 Manufacturer#5 almond antique sky peru orange 1 1 2 2 Manufacturer#5 almond aquamarine dodger light gainsboro 1 1 46 46 Manufacturer#5 almond azure blanched chiffon midnight 1 1 23 23 -PREHOOK: query: -- 19. testMulti3OperatorsFunctionChain -explain +PREHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -2098,8 +2073,7 @@ from noop(on partition by p_mfgr order by p_mfgr ) PREHOOK: type: QUERY -POSTHOOK: query: -- 19. testMulti3OperatorsFunctionChain -explain +POSTHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -2346,8 +2320,7 @@ Manufacturer#5 almond antique medium spring khaki 2 2 6 37 Manufacturer#5 almond antique sky peru orange 3 3 2 39 Manufacturer#5 almond aquamarine dodger light gainsboro 4 4 46 85 Manufacturer#5 almond azure blanched chiffon midnight 5 5 23 108 -PREHOOK: query: -- 23. testMultiOperatorChainWithDiffPartitionForWindow2 -explain +PREHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -2361,8 +2334,7 @@ from noopwithmapstreaming(on order by p_mfgr, p_name) )) PREHOOK: type: QUERY -POSTHOOK: query: -- 23. testMultiOperatorChainWithDiffPartitionForWindow2 -explain +POSTHOOK: query: explain select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, diff --git a/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out b/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out index 9629768..5e9f254 100644 --- a/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/spark/router_join_ppr.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED FROM src a RIGHT OUTER JOIN @@ -9,9 +7,7 @@ EXPLAIN EXTENDED SELECT a.key, a.value, b.key, b.value WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED FROM src a RIGHT OUTER JOIN diff --git a/ql/src/test/results/clientpositive/spark/sample1.q.out b/ql/src/test/results/clientpositive/spark/sample1.q.out index 4bd5c8c..e3e3061 100644 --- a/ql/src/test/results/clientpositive/spark/sample1.q.out +++ b/ql/src/test/results/clientpositive/spark/sample1.q.out @@ -6,14 +6,12 @@ POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING, dt STRING, hr STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 -PREHOOK: query: -- no input pruning, no sample filter -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s WHERE s.ds='2008-04-08' and s.hr='11' PREHOOK: type: QUERY -POSTHOOK: query: -- no input pruning, no sample filter -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON rand()) s WHERE s.ds='2008-04-08' and s.hr='11' diff --git a/ql/src/test/results/clientpositive/spark/sample10.q.out b/ql/src/test/results/clientpositive/spark/sample10.q.out index 386f512..3e7a667 100644 --- a/ql/src/test/results/clientpositive/spark/sample10.q.out +++ b/ql/src/test/results/clientpositive/spark/sample10.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19) - -create table srcpartbucket (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 4 buckets +PREHOOK: query: create table srcpartbucket (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 4 buckets PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@srcpartbucket -POSTHOOK: query: -- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.17, 0.18, 0.19) - -create table srcpartbucket (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 4 buckets +POSTHOOK: query: create table srcpartbucket (key string, value string) partitioned by (ds string, hr string) clustered by (key) into 4 buckets POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@srcpartbucket diff --git a/ql/src/test/results/clientpositive/spark/sample2.q.out b/ql/src/test/results/clientpositive/spark/sample2.q.out index fa230aa..efcf33e 100644 --- a/ql/src/test/results/clientpositive/spark/sample2.q.out +++ b/ql/src/test/results/clientpositive/spark/sample2.q.out @@ -6,15 +6,11 @@ POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 -PREHOOK: query: -- input pruning, no sample filter --- default table sample columns -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2) s PREHOOK: type: QUERY -POSTHOOK: query: -- input pruning, no sample filter --- default table sample columns -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2) s POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/sample3.q.out b/ql/src/test/results/clientpositive/spark/sample3.q.out index 35a4352..667d5c3 100644 --- a/ql/src/test/results/clientpositive/spark/sample3.q.out +++ b/ql/src/test/results/clientpositive/spark/sample3.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- no input pruning, sample filter -EXPLAIN +PREHOOK: query: EXPLAIN SELECT s.key FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 on key) s PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- no input pruning, sample filter -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT s.key FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 5 on key) s POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/sample4.q.out b/ql/src/test/results/clientpositive/spark/sample4.q.out index 0f2af55..c5ca6ef 100644 --- a/ql/src/test/results/clientpositive/spark/sample4.q.out +++ b/ql/src/test/results/clientpositive/spark/sample4.q.out @@ -6,15 +6,11 @@ POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 -PREHOOK: query: -- bucket column is the same as table sample --- No need for sample filter -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2 on key) s PREHOOK: type: QUERY -POSTHOOK: query: -- bucket column is the same as table sample --- No need for sample filter -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 2 on key) s POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/sample6.q.out b/ql/src/test/results/clientpositive/spark/sample6.q.out index 1158732..6d13587 100644 --- a/ql/src/test/results/clientpositive/spark/sample6.q.out +++ b/ql/src/test/results/clientpositive/spark/sample6.q.out @@ -6,13 +6,11 @@ POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 -PREHOOK: query: -- both input pruning and sample filter -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s PREHOOK: type: QUERY -POSTHOOK: query: -- both input pruning and sample filter -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/sample7.q.out b/ql/src/test/results/clientpositive/spark/sample7.q.out index 22663ac..e38252b 100644 --- a/ql/src/test/results/clientpositive/spark/sample7.q.out +++ b/ql/src/test/results/clientpositive/spark/sample7.q.out @@ -6,14 +6,12 @@ POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 -PREHOOK: query: -- both input pruning and sample filter -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s WHERE s.key > 100 PREHOOK: type: QUERY -POSTHOOK: query: -- both input pruning and sample filter -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE dest1 SELECT s.* FROM srcbucket TABLESAMPLE (BUCKET 1 OUT OF 4 on key) s WHERE s.key > 100 diff --git a/ql/src/test/results/clientpositive/spark/sample8.q.out b/ql/src/test/results/clientpositive/spark/sample8.q.out index 59807de..b4b766c 100644 --- a/ql/src/test/results/clientpositive/spark/sample8.q.out +++ b/ql/src/test/results/clientpositive/spark/sample8.q.out @@ -1,16 +1,10 @@ -PREHOOK: query: -- sampling with join and alias --- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT s.* FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON key) s JOIN srcpart TABLESAMPLE (BUCKET 1 OUT OF 10 ON key) t WHERE t.key = s.key and t.value = s.value and s.ds='2008-04-08' and s.hr='11' PREHOOK: type: QUERY -POSTHOOK: query: -- sampling with join and alias --- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT s.* FROM srcpart TABLESAMPLE (BUCKET 1 OUT OF 1 ON key) s JOIN srcpart TABLESAMPLE (BUCKET 1 OUT OF 10 ON key) t diff --git a/ql/src/test/results/clientpositive/spark/script_env_var1.q.out b/ql/src/test/results/clientpositive/spark/script_env_var1.q.out index cd39eb8..c1181b2 100644 --- a/ql/src/test/results/clientpositive/spark/script_env_var1.q.out +++ b/ql/src/test/results/clientpositive/spark/script_env_var1.q.out @@ -1,14 +1,10 @@ -PREHOOK: query: -- Verifies that script operator ID environment variables have unique values --- in each instance of the script operator. -SELECT count(1) FROM +PREHOOK: query: SELECT count(1) FROM ( SELECT * FROM (SELECT TRANSFORM('echo $HIVE_SCRIPT_OPERATOR_ID') USING 'sh' AS key FROM src order by key LIMIT 1)x UNION ALL SELECT * FROM (SELECT TRANSFORM('echo $HIVE_SCRIPT_OPERATOR_ID') USING 'sh' AS key FROM src order by key LIMIT 1)y ) a GROUP BY key PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Verifies that script operator ID environment variables have unique values --- in each instance of the script operator. -SELECT count(1) FROM +POSTHOOK: query: SELECT count(1) FROM ( SELECT * FROM (SELECT TRANSFORM('echo $HIVE_SCRIPT_OPERATOR_ID') USING 'sh' AS key FROM src order by key LIMIT 1)x UNION ALL SELECT * FROM (SELECT TRANSFORM('echo $HIVE_SCRIPT_OPERATOR_ID') USING 'sh' AS key FROM src order by key LIMIT 1)y ) a GROUP BY key POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/script_env_var2.q.out b/ql/src/test/results/clientpositive/spark/script_env_var2.q.out index c3bb990..58a0936 100644 --- a/ql/src/test/results/clientpositive/spark/script_env_var2.q.out +++ b/ql/src/test/results/clientpositive/spark/script_env_var2.q.out @@ -1,12 +1,10 @@ -PREHOOK: query: -- Same test as script_env_var1, but test setting the variable name -SELECT count(1) FROM +PREHOOK: query: SELECT count(1) FROM ( SELECT * FROM (SELECT TRANSFORM('echo $MY_ID') USING 'sh' AS key FROM src LIMIT 1)a UNION ALL SELECT * FROM (SELECT TRANSFORM('echo $MY_ID') USING 'sh' AS key FROM src LIMIT 1)b ) a GROUP BY key PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Same test as script_env_var1, but test setting the variable name -SELECT count(1) FROM +POSTHOOK: query: SELECT count(1) FROM ( SELECT * FROM (SELECT TRANSFORM('echo $MY_ID') USING 'sh' AS key FROM src LIMIT 1)a UNION ALL SELECT * FROM (SELECT TRANSFORM('echo $MY_ID') USING 'sh' AS key FROM src LIMIT 1)b ) a GROUP BY key POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/script_pipe.q.out b/ql/src/test/results/clientpositive/spark/script_pipe.q.out index d815f0a..df23114 100644 --- a/ql/src/test/results/clientpositive/spark/script_pipe.q.out +++ b/ql/src/test/results/clientpositive/spark/script_pipe.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- Tests exception in ScriptOperator.close() by passing to the operator a small amount of data -EXPLAIN SELECT TRANSFORM(*) USING 'true' AS a, b, c FROM (SELECT * FROM src LIMIT 1) tmp +PREHOOK: query: EXPLAIN SELECT TRANSFORM(*) USING 'true' AS a, b, c FROM (SELECT * FROM src LIMIT 1) tmp PREHOOK: type: QUERY -POSTHOOK: query: -- Tests exception in ScriptOperator.close() by passing to the operator a small amount of data -EXPLAIN SELECT TRANSFORM(*) USING 'true' AS a, b, c FROM (SELECT * FROM src LIMIT 1) tmp +POSTHOOK: query: EXPLAIN SELECT TRANSFORM(*) USING 'true' AS a, b, c FROM (SELECT * FROM src LIMIT 1) tmp POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage @@ -62,11 +60,9 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Tests exception in ScriptOperator.processOp() by passing extra data needed to fill pipe buffer -EXPLAIN SELECT TRANSFORM(key, value, key, value, key, value, key, value, key, value, key, value) USING 'head -n 1' as a,b,c,d FROM src +PREHOOK: query: EXPLAIN SELECT TRANSFORM(key, value, key, value, key, value, key, value, key, value, key, value) USING 'head -n 1' as a,b,c,d FROM src PREHOOK: type: QUERY -POSTHOOK: query: -- Tests exception in ScriptOperator.processOp() by passing extra data needed to fill pipe buffer -EXPLAIN SELECT TRANSFORM(key, value, key, value, key, value, key, value, key, value, key, value) USING 'head -n 1' as a,b,c,d FROM src +POSTHOOK: query: EXPLAIN SELECT TRANSFORM(key, value, key, value, key, value, key, value, key, value, key, value) USING 'head -n 1' as a,b,c,d FROM src POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/scriptfile1.q.out b/ql/src/test/results/clientpositive/spark/scriptfile1.q.out index bf202f9..cf718cc 100644 --- a/ql/src/test/results/clientpositive/spark/scriptfile1.q.out +++ b/ql/src/test/results/clientpositive/spark/scriptfile1.q.out @@ -1,20 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- EXCLUDE_OS_WINDOWS - --- NO_SESSION_REUSE - -CREATE TABLE dest1(key INT, value STRING) +PREHOOK: query: CREATE TABLE dest1(key INT, value STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- EXCLUDE_OS_WINDOWS - --- NO_SESSION_REUSE - -CREATE TABLE dest1(key INT, value STRING) +POSTHOOK: query: CREATE TABLE dest1(key INT, value STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/spark/semijoin.q.out b/ql/src/test/results/clientpositive/spark/semijoin.q.out index 085257e..a806383 100644 --- a/ql/src/test/results/clientpositive/spark/semijoin.q.out +++ b/ql/src/test/results/clientpositive/spark/semijoin.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table t1 as select cast(key as int) key, value from src where key <= 10 +PREHOOK: query: create table t1 as select cast(key as int) key, value from src where key <= 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table t1 as select cast(key as int) key, value from src where key <= 10 +POSTHOOK: query: create table t1 as select cast(key as int) key, value from src where key <= 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/spark/skewjoin.q.out b/ql/src/test/results/clientpositive/spark/skewjoin.q.out index 727487f..dade580 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoin.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoin.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 diff --git a/ql/src/test/results/clientpositive/spark/skewjoin_union_remove_1.q.out b/ql/src/test/results/clientpositive/spark/skewjoin_union_remove_1.q.out index 064acd7..be71458 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoin_union_remove_1.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoin_union_remove_1.q.out @@ -1,25 +1,9 @@ -PREHOOK: query: -- This is to test the union->selectstar->filesink and skewjoin optimization --- Union of 2 map-reduce subqueries is performed for the skew join --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output, it might be easier to run the test --- only on hadoop 23 - -CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- This is to test the union->selectstar->filesink and skewjoin optimization --- Union of 2 map-reduce subqueries is performed for the skew join --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output, it might be easier to run the test --- only on hadoop 23 - -CREATE TABLE T1(key STRING, val STRING) +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) SKEWED BY (key) ON ((2)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -50,14 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- a simple join query with skew on both the tables on the join key - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- a simple join query with skew on both the tables on the join key - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -197,14 +177,10 @@ POSTHOOK: Input: default@t2 8 18 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- test outer joins also - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- test outer joins also - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/skewjoin_union_remove_2.q.out b/ql/src/test/results/clientpositive/spark/skewjoin_union_remove_2.q.out index c9f35d5..11da0dc 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoin_union_remove_2.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoin_union_remove_2.q.out @@ -50,26 +50,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t3 -PREHOOK: query: -- This is to test the union->selectstar->filesink and skewjoin optimization --- Union of 3 map-reduce subqueries is performed for the skew join --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table, it might be easier --- to run the test only on hadoop 23 - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key PREHOOK: type: QUERY -POSTHOOK: query: -- This is to test the union->selectstar->filesink and skewjoin optimization --- Union of 3 map-reduce subqueries is performed for the skew join --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table, it might be easier --- to run the test only on hadoop 23 - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt1.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt1.q.out index 4558c1d..7cd826a 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt1.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt1.q.out @@ -34,16 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- a simple join query with skew on both the tables on the join key --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- a simple join query with skew on both the tables on the join key --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -183,14 +177,10 @@ POSTHOOK: Input: default@t2 8 18 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- test outer joins also - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- test outer joins also - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -332,14 +322,10 @@ NULL NULL 5 15 8 18 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- an aggregation at the end should not change anything - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- an aggregation at the end should not change anything - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt10.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt10.q.out index 527079b..3c8a254 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt10.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt10.q.out @@ -36,16 +36,10 @@ POSTHOOK: Input: default@t1 POSTHOOK: Output: default@array_valued_t1 POSTHOOK: Lineage: array_valued_t1.key SIMPLE [(t1)t1.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: array_valued_t1.value EXPRESSION [(t1)t1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: -- This test is to verify the skew join compile optimization when the join is followed by a lateral view --- adding a order by at the end to make the results deterministic - -explain +PREHOOK: query: explain select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val PREHOOK: type: QUERY -POSTHOOK: query: -- This test is to verify the skew join compile optimization when the join is followed by a lateral view --- adding a order by at the end to make the results deterministic - -explain +POSTHOOK: query: explain select * from (select a.key as key, b.value as array_val from T1 a join array_valued_T1 b on a.key=b.key) i lateral view explode (array_val) c as val POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt11.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt11.q.out index 4028828..cb69cd1 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt11.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt11.q.out @@ -32,12 +32,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- This test is to verify the skew join compile optimization when the join is followed --- by a union. Both sides of a union consist of a join, which should have used --- skew join compile time optimization. --- adding an order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN select * from ( select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key @@ -45,12 +40,7 @@ select * from select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key ) subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- This test is to verify the skew join compile optimization when the join is followed --- by a union. Both sides of a union consist of a join, which should have used --- skew join compile time optimization. --- adding an order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN select * from ( select a.key, a.val as val1, b.val as val2 from T1 a join T2 b on a.key = b.key diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt12.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt12.q.out index 921c4ba..6a716fb 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt12.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt12.q.out @@ -34,18 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- Both the join tables are skewed by 2 keys, and one of the skewed values --- is common to both the tables. The join key matches the skewed key set. --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val PREHOOK: type: QUERY -POSTHOOK: query: -- Both the join tables are skewed by 2 keys, and one of the skewed values --- is common to both the tables. The join key matches the skewed key set. --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt13.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt13.q.out index e3db707..ff43702 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt13.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt13.q.out @@ -48,27 +48,13 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t3 -PREHOOK: query: -- This test is for skewed join compile time optimization for more than 2 tables. --- The join key for table 3 is different from the join key used for joining --- tables 1 and 2. Table 3 is skewed, but since one of the join sources for table --- 3 consist of a sub-query which contains a join, the compile time skew join --- optimization is not performed --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN select * from T1 a join T2 b on a.key = b.key join T3 c on a.val = c.val PREHOOK: type: QUERY -POSTHOOK: query: -- This test is for skewed join compile time optimization for more than 2 tables. --- The join key for table 3 is different from the join key used for joining --- tables 1 and 2. Table 3 is skewed, but since one of the join sources for table --- 3 consist of a sub-query which contains a join, the compile time skew join --- optimization is not performed --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN select * from T1 a join T2 b on a.key = b.key diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt14.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt14.q.out index ffba6ee..9b6e5ff 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt14.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt14.q.out @@ -50,29 +50,13 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t3 -PREHOOK: query: -- This test is for skewed join compile time optimization for more than 2 tables. --- The join key for table 3 is different from the join key used for joining --- tables 1 and 2. Tables 1 and 3 are skewed. Since one of the join sources for table --- 3 consist of a sub-query which contains a join, the compile time skew join --- optimization is not enabled for table 3, but it is used for the first join between --- tables 1 and 2 --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN select * from T1 a join T2 b on a.key = b.key join T3 c on a.val = c.val PREHOOK: type: QUERY -POSTHOOK: query: -- This test is for skewed join compile time optimization for more than 2 tables. --- The join key for table 3 is different from the join key used for joining --- tables 1 and 2. Tables 1 and 3 are skewed. Since one of the join sources for table --- 3 consist of a sub-query which contains a join, the compile time skew join --- optimization is not enabled for table 3, but it is used for the first join between --- tables 1 and 2 --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN select * from T1 a join T2 b on a.key = b.key diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt15.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt15.q.out index 3851107..28a2e00 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt15.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt15.q.out @@ -14,13 +14,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmp POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@tmpt1 -PREHOOK: query: -- testing skew on other data types - int -CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) +PREHOOK: query: CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- testing skew on other data types - int -CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) +POSTHOOK: query: CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 @@ -68,22 +66,10 @@ POSTHOOK: Input: default@tmpt2 POSTHOOK: Output: default@t2 POSTHOOK: Lineage: t2.key EXPRESSION [(tmpt2)tmpt2.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: t2.val SIMPLE [(tmpt2)tmpt2.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: -- The skewed key is a integer column. --- Otherwise this test is similar to skewjoinopt1.q --- Both the joined tables are skewed, and the joined column --- is an integer --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- The skewed key is a integer column. --- Otherwise this test is similar to skewjoinopt1.q --- Both the joined tables are skewed, and the joined column --- is an integer --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -223,14 +209,10 @@ POSTHOOK: Input: default@t2 8 18 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- test outer joins also - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- test outer joins also - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a RIGHT OUTER JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -372,14 +354,10 @@ NULL NULL 5 15 8 18 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- an aggregation at the end should not change anything - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- an aggregation at the end should not change anything - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT count(1) FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt16.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt16.q.out index 62540cc..484dbf7 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt16.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt16.q.out @@ -34,18 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- One of the tables is skewed by 2 columns, and the other table is --- skewed by one column. Ths join is performed on the both the columns --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val PREHOOK: type: QUERY -POSTHOOK: query: -- One of the tables is skewed by 2 columns, and the other table is --- skewed by one column. Ths join is performed on the both the columns --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt17.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt17.q.out index ca33d86..2ca749a 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt17.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt17.q.out @@ -34,22 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- One of the tables is skewed by 2 columns, and the other table is --- skewed by one column. Ths join is performed on the first skewed column --- The skewed value for the jon key is common to both the tables. --- In this case, the skewed join value is not repeated in the filter. --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- One of the tables is skewed by 2 columns, and the other table is --- skewed by one column. Ths join is performed on the first skewed column --- The skewed value for the jon key is common to both the tables. --- In this case, the skewed join value is not repeated in the filter. --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -241,18 +229,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- One of the tables is skewed by 2 columns, and the other table is --- skewed by one column. Ths join is performed on the both the columns --- In this case, the skewed join value is repeated in the filter. - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val PREHOOK: type: QUERY -POSTHOOK: query: -- One of the tables is skewed by 2 columns, and the other table is --- skewed by one column. Ths join is performed on the both the columns --- In this case, the skewed join value is repeated in the filter. - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt18.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt18.q.out index c455ede..7d459e8 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt18.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt18.q.out @@ -14,13 +14,11 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T1.txt' INTO TABLE tmp POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@tmpt1 -PREHOOK: query: -- testing skew on other data types - int -CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) +PREHOOK: query: CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- testing skew on other data types - int -CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) +POSTHOOK: query: CREATE TABLE T1(key INT, val STRING) SKEWED BY (key) ON ((2)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 @@ -34,18 +32,12 @@ POSTHOOK: Input: default@tmpt1 POSTHOOK: Output: default@t1 POSTHOOK: Lineage: t1.key EXPRESSION [(tmpt1)tmpt1.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: t1.val SIMPLE [(tmpt1)tmpt1.FieldSchema(name:val, type:string, comment:null), ] -PREHOOK: query: -- Tke skewed column is same in both the tables, however it is --- INT in one of the tables, and STRING in the other table - -CREATE TABLE T2(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T2(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T2 -POSTHOOK: query: -- Tke skewed column is same in both the tables, however it is --- INT in one of the tables, and STRING in the other table - -CREATE TABLE T2(key STRING, val STRING) +POSTHOOK: query: CREATE TABLE T2(key STRING, val STRING) SKEWED BY (key) ON ((3)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -58,20 +50,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- Once HIVE-3445 is fixed, the compile time skew join optimization would be --- applicable here. Till the above jira is fixed, it would be performed as a --- regular join --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Once HIVE-3445 is fixed, the compile time skew join optimization would be --- applicable here. Till the above jira is fixed, it would be performed as a --- regular join --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt19.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt19.q.out index 5da0979..9606b8c 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt19.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt19.q.out @@ -34,20 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- add a test where the skewed key is also the bucketized key --- it should not matter, and the compile time skewed join --- optimization is performed --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- add a test where the skewed key is also the bucketized key --- it should not matter, and the compile time skewed join --- optimization is performed --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt2.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt2.q.out index 85e0a6c..3646478 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt2.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt2.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) SKEWED BY (key) ON ((2), (7)) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE T1(key STRING, val STRING) +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) SKEWED BY (key) ON ((2), (7)) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -38,22 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- a simple query with skew on both the tables on the join key --- multiple skew values are present for the skewed keys --- but the skewed values do not overlap. --- The join values are a superset of the skewed keys. --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val PREHOOK: type: QUERY -POSTHOOK: query: -- a simple query with skew on both the tables on the join key --- multiple skew values are present for the skewed keys --- but the skewed values do not overlap. --- The join values are a superset of the skewed keys. --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -186,14 +170,10 @@ POSTHOOK: Input: default@t2 3 13 3 13 8 18 8 18 8 18 8 18 -PREHOOK: query: -- test outer joins also - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a LEFT OUTER JOIN T2 b ON a.key = b.key and a.val = b.val PREHOOK: type: QUERY -POSTHOOK: query: -- test outer joins also - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a LEFT OUTER JOIN T2 b ON a.key = b.key and a.val = b.val POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -330,14 +310,10 @@ POSTHOOK: Input: default@t2 8 18 8 18 8 18 8 18 8 28 NULL NULL -PREHOOK: query: -- a group by at the end should not change anything - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.key, count(1) FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val group by a.key PREHOOK: type: QUERY -POSTHOOK: query: -- a group by at the end should not change anything - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.key, count(1) FROM T1 a JOIN T2 b ON a.key = b.key and a.val = b.val group by a.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt20.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt20.q.out index 3369d92..0b38eff 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt20.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt20.q.out @@ -34,20 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- add a test where the skewed key is also the bucketized/sorted key --- it should not matter, and the compile time skewed join --- optimization is performed --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- add a test where the skewed key is also the bucketized/sorted key --- it should not matter, and the compile time skewed join --- optimization is performed --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt3.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt3.q.out index e8f6e8e..b01f348 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt3.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt3.q.out @@ -34,20 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- a simple query with skew on both the tables. One of the skewed --- value is common to both the tables. The skewed value should not be --- repeated in the filter. --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- a simple query with skew on both the tables. One of the skewed --- value is common to both the tables. The skewed value should not be --- repeated in the filter. --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -187,14 +177,10 @@ POSTHOOK: Input: default@t2 8 18 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- test outer joins also - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- test outer joins also - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a FULL OUTER JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt4.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt4.q.out index df07256..608ca48 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt4.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt4.q.out @@ -32,18 +32,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- only of the tables of the join (the left table of the join) is skewed --- the skewed filter would still be applied to both the tables --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- only of the tables of the join (the left table of the join) is skewed --- the skewed filter would still be applied to both the tables --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -183,12 +175,10 @@ POSTHOOK: Input: default@t2 8 18 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- the order of the join should not matter, just confirming -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T2 a JOIN T1 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- the order of the join should not matter, just confirming -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T2 a JOIN T1 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt5.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt5.q.out index 2f169f4..3f2a939 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt5.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt5.q.out @@ -34,18 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- One of the tables is skewed by 2 columns, and the other table is --- skewed by one column. Ths join is performed on the first skewed column --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- One of the tables is skewed by 2 columns, and the other table is --- skewed by one column. Ths join is performed on the first skewed column --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt6.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt6.q.out index 5ce0b72..c00ab12 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt6.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt6.q.out @@ -34,20 +34,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- Both the join tables are skewed by 2 keys, and one of the skewed values --- is common to both the tables. The join key is a subset of the skewed key set: --- it only contains the first skewed key for both the tables --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Both the join tables are skewed by 2 keys, and one of the skewed values --- is common to both the tables. The join key is a subset of the skewed key set: --- it only contains the first skewed key for both the tables --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.* FROM T1 a JOIN T2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt7.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt7.q.out index a675f4c..11da0dc 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt7.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt7.q.out @@ -50,20 +50,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t3 -PREHOOK: query: -- This test is for validating skewed join compile time optimization for more than --- 2 tables. The join key is the same, and so a 3-way join would be performed. --- 2 of the 3 tables are skewed on the join key --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key PREHOOK: type: QUERY -POSTHOOK: query: -- This test is for validating skewed join compile time optimization for more than --- 2 tables. The join key is the same, and so a 3-way join would be performed. --- 2 of the 3 tables are skewed on the join key --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt8.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt8.q.out index 2425b61..38f2b1c 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt8.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt8.q.out @@ -48,20 +48,10 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t3 -PREHOOK: query: -- This test is for validating skewed join compile time optimization for more than --- 2 tables. The join key is the same, and so a 3-way join would be performed. --- 1 of the 3 tables are skewed on the join key --- adding a order by at the end to make the results deterministic - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key PREHOOK: type: QUERY -POSTHOOK: query: -- This test is for validating skewed join compile time optimization for more than --- 2 tables. The join key is the same, and so a 3-way join would be performed. --- 1 of the 3 tables are skewed on the join key --- adding a order by at the end to make the results deterministic - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.*, b.*, c.* FROM T1 a JOIN T2 b ON a.key = b.key JOIN T3 c on a.key = c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/skewjoinopt9.q.out b/ql/src/test/results/clientpositive/spark/skewjoinopt9.q.out index bfc0fb3..ece2e1c 100644 --- a/ql/src/test/results/clientpositive/spark/skewjoinopt9.q.out +++ b/ql/src/test/results/clientpositive/spark/skewjoinopt9.q.out @@ -32,10 +32,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T2.txt' INTO TABLE T2 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t2 -PREHOOK: query: -- no skew join compile time optimization would be performed if one of the --- join sources is a sub-query consisting of a union all --- adding a order by at the end to make the results deterministic -EXPLAIN +PREHOOK: query: EXPLAIN select * from ( select key, val from T1 @@ -44,10 +41,7 @@ select key, val from T1 ) subq1 join T2 b on subq1.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- no skew join compile time optimization would be performed if one of the --- join sources is a sub-query consisting of a union all --- adding a order by at the end to make the results deterministic -EXPLAIN +POSTHOOK: query: EXPLAIN select * from ( select key, val from T1 @@ -163,18 +157,14 @@ POSTHOOK: Input: default@t2 8 28 8 18 8 28 8 18 8 28 8 18 -PREHOOK: query: -- no skew join compile time optimization would be performed if one of the --- join sources is a sub-query consisting of a group by -EXPLAIN +PREHOOK: query: EXPLAIN select * from ( select key, count(1) as cnt from T1 group by key ) subq1 join T2 b on subq1.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- no skew join compile time optimization would be performed if one of the --- join sources is a sub-query consisting of a group by -EXPLAIN +POSTHOOK: query: EXPLAIN select * from ( select key, count(1) as cnt from T1 group by key diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_1.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_1.q.out index 5365420..5de1ea4 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_1.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_1.q.out @@ -46,14 +46,10 @@ POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwr POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@smb_bucket_3 -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_10.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_10.q.out index 098cc59..29e98e9 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_10.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_10.q.out @@ -20,15 +20,11 @@ POSTHOOK: query: alter table tmp_smb_bucket_10 add partition (ds = '2') POSTHOOK: type: ALTERTABLE_ADDPARTS POSTHOOK: Output: default@tmp_smb_bucket_10 POSTHOOK: Output: default@tmp_smb_bucket_10@ds=2 -PREHOOK: query: -- add dummy files to make sure that the number of files in each partition is same as number of buckets - -load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1') +PREHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1') PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@tmp_smb_bucket_10@ds=1 -POSTHOOK: query: -- add dummy files to make sure that the number of files in each partition is same as number of buckets - -load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1') +POSTHOOK: query: load data local inpath '../../data/files/smbbucket_1.rc' INTO TABLE tmp_smb_bucket_10 partition(ds='1') POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@tmp_smb_bucket_10@ds=1 diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out index bb2eac9..c5bc674 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_11.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- This test verifies that the output of a sort merge join on 2 partitions (one on each side of the join) is bucketed - --- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- This test verifies that the output of a sort merge join on 2 partitions (one on each side of the join) is bucketed - --- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table1 @@ -38,22 +32,18 @@ POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSch POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Create a bucketed table -CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS +PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table3 -POSTHOOK: query: -- Create a bucketed table -CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS +POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table3 -PREHOOK: query: -- Insert data into the bucketed table by joining the two bucketed and sorted tables, bucketing is not enforced -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by joining the two bucketed and sorted tables, bucketing is not enforced -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds = '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -2085,16 +2075,14 @@ POSTHOOK: Input: default@test_table3@ds=1 449 val_449 1 481 val_481 1 497 val_497 1 -PREHOOK: query: -- Join data from a sampled bucket to verify the data is bucketed -SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' +PREHOOK: query: SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' PREHOOK: type: QUERY PREHOOK: Input: default@test_table1 PREHOOK: Input: default@test_table1@ds=1 PREHOOK: Input: default@test_table3 PREHOOK: Input: default@test_table3@ds=1 #### A masked pattern was here #### -POSTHOOK: query: -- Join data from a sampled bucket to verify the data is bucketed -SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' +POSTHOOK: query: SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' POSTHOOK: type: QUERY POSTHOOK: Input: default@test_table1 POSTHOOK: Input: default@test_table1@ds=1 diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out index d186987..d85d0b9 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_12.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- This test verifies that the output of a sort merge join on 1 big partition with multiple small partitions is bucketed and sorted - --- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- This test verifies that the output of a sort merge join on 1 big partition with multiple small partitions is bucketed and sorted - --- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table1 @@ -50,22 +44,18 @@ POSTHOOK: Lineage: test_table2 PARTITION(ds=2).key EXPRESSION [(src)src.FieldSch POSTHOOK: Lineage: test_table2 PARTITION(ds=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=3).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2 PARTITION(ds=3).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Create a bucketed table -CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +PREHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table3 -POSTHOOK: query: -- Create a bucketed table -CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS +POSTHOOK: query: CREATE TABLE test_table3 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table3 -PREHOOK: query: -- Insert data into the bucketed table by joining the two bucketed and sorted tables, bucketing is not enforced -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by joining the two bucketed and sorted tables, bucketing is not enforced -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT /*+ MAPJOIN(b) */ a.key, b.value FROM test_table1 a JOIN test_table2 b ON a.key = b.key AND a.ds = '1' AND b.ds >= '1' POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -257,16 +247,14 @@ POSTHOOK: Input: default@test_table2@ds=3 POSTHOOK: Output: default@test_table3@ds=1 POSTHOOK: Lineage: test_table3 PARTITION(ds=1).key SIMPLE [(test_table1)a.FieldSchema(name:key, type:int, comment:null), ] POSTHOOK: Lineage: test_table3 PARTITION(ds=1).value SIMPLE [(test_table2)b.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: -- Join data from a sampled bucket to verify the data is bucketed -SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' +PREHOOK: query: SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' PREHOOK: type: QUERY PREHOOK: Input: default@test_table1 PREHOOK: Input: default@test_table1@ds=1 PREHOOK: Input: default@test_table3 PREHOOK: Input: default@test_table3@ds=1 #### A masked pattern was here #### -POSTHOOK: query: -- Join data from a sampled bucket to verify the data is bucketed -SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' +POSTHOOK: query: SELECT COUNT(*) FROM test_table3 TABLESAMPLE(BUCKET 2 OUT OF 16) a JOIN test_table1 TABLESAMPLE(BUCKET 2 OUT OF 16) b ON a.key = b.key AND a.ds = '1' AND b.ds='1' POSTHOOK: type: QUERY POSTHOOK: Input: default@test_table1 POSTHOOK: Input: default@test_table1@ds=1 @@ -274,13 +262,11 @@ POSTHOOK: Input: default@test_table3 POSTHOOK: Input: default@test_table3@ds=1 #### A masked pattern was here #### 879 -PREHOOK: query: -- Join data from the sampled buckets of 2 tables to verify the data is bucketed and sorted -explain extended +PREHOOK: query: explain extended INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1' PREHOOK: type: QUERY -POSTHOOK: query: -- Join data from the sampled buckets of 2 tables to verify the data is bucketed and sorted -explain extended +POSTHOOK: query: explain extended INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '2') SELECT /*+mapjoin(b)*/ a.key, concat(a.value, b.value) FROM test_table3 a JOIN test_table1 b ON a.key = b.key AND a.ds = '1' AND b.ds='1' POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out index 7e6f225..dc2bfe7 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_13.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- This test verifies that the sort merge join optimizer works when the tables are joined on columns with different names - --- Create bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- This test verifies that the sort merge join optimizer works when the tables are joined on columns with different names - --- Create bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table1 @@ -66,14 +60,10 @@ POSTHOOK: Lineage: test_table3.key EXPRESSION [(src)src.FieldSchema(name:key, ty POSTHOOK: Lineage: test_table3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: test_table4.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Join data from 2 tables on their respective sorted columns (one each, with different names) and --- verify sort merge join is used -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Join data from 2 tables on their respective sorted columns (one each, with different names) and --- verify sort merge join is used -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.value ORDER BY a.key LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -233,14 +223,10 @@ POSTHOOK: Input: default@test_table2 0 val_0 0 val_0 0 val_0 0 val_0 2 val_2 2 val_2 -PREHOOK: query: -- Join data from 2 tables on their respective columns (two each, with the same names but sorted --- with different priorities) and verify sort merge join is not used -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+mapjoin(b)*/ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- Join data from 2 tables on their respective columns (two each, with the same names but sorted --- with different priorities) and verify sort merge join is not used -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+mapjoin(b)*/ * FROM test_table3 a JOIN test_table4 b ON a.key = b.value ORDER BY a.key LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_14.q.out index c13bb4f..68cd0fb 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_14.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_14.q.out @@ -38,14 +38,12 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@tbl2 POSTHOOK: Lineage: tbl2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tbl2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- The mapjoin is being performed as part of sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 PREHOOK: type: QUERY -POSTHOOK: query: -- The mapjoin is being performed as part of sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key ) subq1 @@ -121,9 +119,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 22 -PREHOOK: query: -- The mapjoin is being performed as part of sub-query. It should be converted to a sort-merge join --- Add a order by at the end to make the results deterministic. -explain +PREHOOK: query: explain select key, count(*) from ( select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key @@ -131,9 +127,7 @@ select key, count(*) from group by key order by key PREHOOK: type: QUERY -POSTHOOK: query: -- The mapjoin is being performed as part of sub-query. It should be converted to a sort-merge join --- Add a order by at the end to make the results deterministic. -explain +POSTHOOK: query: explain select key, count(*) from ( select /*+mapjoin(a)*/ a.key as key, a.value as val1, b.value as val2 from tbl1 a join tbl2 b on a.key = b.key @@ -240,8 +234,7 @@ POSTHOOK: Input: default@tbl2 5 9 8 1 9 1 -PREHOOK: query: -- The mapjoin is being performed as part of more than one sub-query. It should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select key, count(*) from @@ -251,8 +244,7 @@ select count(*) from group by key ) subq2 PREHOOK: type: QUERY -POSTHOOK: query: -- The mapjoin is being performed as part of more than one sub-query. It should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select key, count(*) from @@ -367,18 +359,14 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 6 -PREHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join. -explain +PREHOOK: query: explain select /*+mapjoin(subq1)*/ count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join (select a.key as key, a.value as value from tbl2 a where key < 6) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join. -explain +POSTHOOK: query: explain select /*+mapjoin(subq1)*/ count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -464,9 +452,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join, although there is more than one level of sub-query -explain +PREHOOK: query: explain select /*+mapjoin(subq2)*/ count(*) from ( select * from @@ -478,9 +464,7 @@ select /*+mapjoin(subq2)*/ count(*) from join tbl2 b on subq2.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters, it should --- be converted to a sort-merge join, although there is more than one level of sub-query -explain +POSTHOOK: query: explain select /*+mapjoin(subq2)*/ count(*) from ( select * from @@ -577,9 +561,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- Both the big table and the small table are nested sub-queries i.e more then 1 level of sub-query. --- The join should be converted to a sort-merge join -explain +PREHOOK: query: explain select /*+mapjoin(subq2)*/ count(*) from ( select * from @@ -598,9 +580,7 @@ select /*+mapjoin(subq2)*/ count(*) from ) subq4 on subq2.key = subq4.key PREHOOK: type: QUERY -POSTHOOK: query: -- Both the big table and the small table are nested sub-queries i.e more then 1 level of sub-query. --- The join should be converted to a sort-merge join -explain +POSTHOOK: query: explain select /*+mapjoin(subq2)*/ count(*) from ( select * from @@ -720,20 +700,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl1 #### A masked pattern was here #### 20 -PREHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters and the join key --- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one --- item, but that is not part of the join key. -explain +PREHOOK: query: explain select /*+mapjoin(subq1)*/ count(*) from (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 join (select a.key as key, concat(a.value, a.value) as value from tbl2 a where key < 8) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- The subquery itself is being map-joined. Since the sub-query only contains selects and filters and the join key --- is not getting modified, it should be converted to a sort-merge join. Note that the sub-query modifies one --- item, but that is not part of the join key. -explain +POSTHOOK: query: explain select /*+mapjoin(subq1)*/ count(*) from (select a.key as key, concat(a.value, a.value) as value from tbl1 a where key < 8) subq1 join @@ -819,18 +793,14 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side --- join should be performed -explain +PREHOOK: query: explain select /*+mapjoin(subq1)*/ count(*) from (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 join (select a.key +1 as key, concat(a.value, a.value) as value from tbl2 a) subq2 on subq1.key = subq2.key PREHOOK: type: QUERY -POSTHOOK: query: -- Since the join key is modified by the sub-query, neither sort-merge join not bucketized map-side --- join should be performed -explain +POSTHOOK: query: explain select /*+mapjoin(subq1)*/ count(*) from (select a.key +1 as key, concat(a.value, a.value) as value from tbl1 a) subq1 join @@ -944,16 +914,12 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 22 -PREHOOK: query: -- The small table is a sub-query and the big table is not. --- It should be converted to a sort-merge join. -explain +PREHOOK: query: explain select /*+mapjoin(subq1)*/ count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join tbl2 a on subq1.key = a.key PREHOOK: type: QUERY -POSTHOOK: query: -- The small table is a sub-query and the big table is not. --- It should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select /*+mapjoin(subq1)*/ count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join tbl2 a on subq1.key = a.key @@ -1029,16 +995,12 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- The big table is a sub-query and the small table is not. --- It should be converted to a sort-merge join. -explain +PREHOOK: query: explain select /*+mapjoin(a)*/ count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join tbl2 a on subq1.key = a.key PREHOOK: type: QUERY -POSTHOOK: query: -- The big table is a sub-query and the small table is not. --- It should be converted to a sort-merge join. -explain +POSTHOOK: query: explain select /*+mapjoin(a)*/ count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join tbl2 a on subq1.key = a.key @@ -1118,9 +1080,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 20 -PREHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. --- It should be converted to to a sort-merge join -explain +PREHOOK: query: explain select /*+mapjoin(subq1, subq2)*/ count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -1130,9 +1090,7 @@ select /*+mapjoin(subq1, subq2)*/ count(*) from (select a.key as key, a.value as value from tbl2 a where key < 6) subq3 on (subq1.key = subq3.key) PREHOOK: type: QUERY -POSTHOOK: query: -- There are more than 2 inputs to the join, all of them being sub-queries. --- It should be converted to to a sort-merge join -explain +POSTHOOK: query: explain select /*+mapjoin(subq1, subq2)*/ count(*) from (select a.key as key, a.value as value from tbl1 a where key < 6) subq1 join @@ -1229,9 +1187,7 @@ POSTHOOK: Input: default@tbl1 POSTHOOK: Input: default@tbl2 #### A masked pattern was here #### 56 -PREHOOK: query: -- The mapjoin is being performed on a nested sub-query, and an aggregation is performed after that. --- The join should be converted to a sort-merge join -explain +PREHOOK: query: explain select count(*) from ( select /*+mapjoin(subq2)*/ subq2.key as key, subq2.value as value1, b.value as value2 from ( @@ -1244,9 +1200,7 @@ select count(*) from ( join tbl2 b on subq2.key = b.key) a PREHOOK: type: QUERY -POSTHOOK: query: -- The mapjoin is being performed on a nested sub-query, and an aggregation is performed after that. --- The join should be converted to a sort-merge join -explain +POSTHOOK: query: explain select count(*) from ( select /*+mapjoin(subq2)*/ subq2.key as key, subq2.value as value1, b.value as value2 from ( diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out index 4152280..41ba4d6 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_15.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- This test verifies that the sort merge join optimizer works when the tables are sorted on columns which is a superset --- of join columns - --- Create bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- This test verifies that the sort merge join optimizer works when the tables are sorted on columns which is a superset --- of join columns - --- Create bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, value ASC) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table1 @@ -40,12 +32,10 @@ POSTHOOK: Lineage: test_table1.key EXPRESSION [(src)src.FieldSchema(name:key, ty POSTHOOK: Lineage: test_table1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: test_table2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- it should be converted to a sort-merge join, since the first sort column (#join columns = 1) contains the join columns -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key ORDER BY a.key LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- it should be converted to a sort-merge join, since the first sort column (#join columns = 1) contains the join columns -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key ORDER BY a.key LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -221,13 +211,11 @@ POSTHOOK: query: DROP TABLE test_table2 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@test_table2 POSTHOOK: Output: default@test_table2 -PREHOOK: query: -- Create bucketed and sorted tables -CREATE TABLE test_table1 (key INT, key2 INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, key2 ASC, value ASC) INTO 16 BUCKETS +PREHOOK: query: CREATE TABLE test_table1 (key INT, key2 INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, key2 ASC, value ASC) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create bucketed and sorted tables -CREATE TABLE test_table1 (key INT, key2 INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, key2 ASC, value ASC) INTO 16 BUCKETS +POSTHOOK: query: CREATE TABLE test_table1 (key INT, key2 INT, value STRING) CLUSTERED BY (key) SORTED BY (key ASC, key2 ASC, value ASC) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table1 @@ -259,12 +247,10 @@ POSTHOOK: Lineage: test_table1.value SIMPLE [(src)src.FieldSchema(name:value, ty POSTHOOK: Lineage: test_table2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2.key2 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- it should be converted to a sort-merge join, since the first 2 sort columns (#join columns = 2) contain the join columns -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.key2 = b.key2 ORDER BY a.key LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- it should be converted to a sort-merge join, since the first 2 sort columns (#join columns = 2) contain the join columns -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.key2 = b.key2 ORDER BY a.key LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -424,14 +410,10 @@ POSTHOOK: Input: default@test_table2 0 0 val_0 0 0 val_0 0 0 val_0 0 0 val_0 2 2 val_2 2 2 val_2 -PREHOOK: query: -- it should be converted to a sort-merge join, since the first 2 sort columns (#join columns = 2) contain the join columns --- even if the order is not the same -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key2 = b.key2 and a.key = b.key ORDER BY a.key LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- it should be converted to a sort-merge join, since the first 2 sort columns (#join columns = 2) contain the join columns --- even if the order is not the same -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key2 = b.key2 and a.key = b.key ORDER BY a.key LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -591,14 +573,10 @@ POSTHOOK: Input: default@test_table2 0 0 val_0 0 0 val_0 0 0 val_0 0 0 val_0 2 2 val_2 2 2 val_2 -PREHOOK: query: -- it should not be converted to a sort-merge join, since the first 2 sort columns (#join columns = 2) do not contain all --- the join columns -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.value = b.value ORDER BY a.key LIMIT 10 PREHOOK: type: QUERY -POSTHOOK: query: -- it should not be converted to a sort-merge join, since the first 2 sort columns (#join columns = 2) do not contain all --- the join columns -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT /*+mapjoin(b)*/ * FROM test_table1 a JOIN test_table2 b ON a.key = b.key and a.value = b.value ORDER BY a.key LIMIT 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_16.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_16.q.out index 5652170..c845a72 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_16.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_16.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Create bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table1 @@ -34,12 +32,10 @@ POSTHOOK: Lineage: test_table1.key EXPRESSION [(src)src.FieldSchema(name:key, ty POSTHOOK: Lineage: test_table1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: test_table2.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Mapjoin followed by a aggregation should be performed in a single MR job -EXPLAIN +PREHOOK: query: EXPLAIN SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- Mapjoin followed by a aggregation should be performed in a single MR job -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT /*+mapjoin(b)*/ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_17.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_17.q.out index 2a0c35c..25e29f6 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_17.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_17.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Create bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table1 @@ -160,8 +158,7 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table8 POSTHOOK: Lineage: test_table8.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table8.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Mapjoin followed by a aggregation should be performed in a single MR job upto 7 tables -EXPLAIN +PREHOOK: query: EXPLAIN SELECT /*+ mapjoin(b, c, d, e, f, g) */ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key JOIN test_table3 c ON a.key = c.key @@ -170,8 +167,7 @@ JOIN test_table5 e ON a.key = e.key JOIN test_table6 f ON a.key = f.key JOIN test_table7 g ON a.key = g.key PREHOOK: type: QUERY -POSTHOOK: query: -- Mapjoin followed by a aggregation should be performed in a single MR job upto 7 tables -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT /*+ mapjoin(b, c, d, e, f, g) */ count(*) FROM test_table1 a JOIN test_table2 b ON a.key = b.key JOIN test_table3 c ON a.key = c.key @@ -279,9 +275,7 @@ POSTHOOK: Input: default@test_table6 POSTHOOK: Input: default@test_table7 #### A masked pattern was here #### 4378 -PREHOOK: query: -- It should be automatically converted to a sort-merge join followed by a groupby in --- a single MR job -EXPLAIN +PREHOOK: query: EXPLAIN SELECT count(*) FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key LEFT OUTER JOIN test_table3 c ON a.key = c.key @@ -290,9 +284,7 @@ LEFT OUTER JOIN test_table5 e ON a.key = e.key LEFT OUTER JOIN test_table6 f ON a.key = f.key LEFT OUTER JOIN test_table7 g ON a.key = g.key PREHOOK: type: QUERY -POSTHOOK: query: -- It should be automatically converted to a sort-merge join followed by a groupby in --- a single MR job -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT count(*) FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key LEFT OUTER JOIN test_table3 c ON a.key = c.key @@ -527,8 +519,7 @@ POSTHOOK: Input: default@test_table7 POSTHOOK: Input: default@test_table8 #### A masked pattern was here #### 13126 -PREHOOK: query: -- outer join with max 16 aliases -EXPLAIN +PREHOOK: query: EXPLAIN SELECT a.* FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key @@ -551,8 +542,7 @@ LEFT OUTER JOIN test_table6 r ON a.key = r.key LEFT OUTER JOIN test_table7 s ON a.key = s.key LEFT OUTER JOIN test_table8 t ON a.key = t.key PREHOOK: type: QUERY -POSTHOOK: query: -- outer join with max 16 aliases -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT a.* FROM test_table1 a LEFT OUTER JOIN test_table2 b ON a.key = b.key diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out index d3494de..6ed3c21 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_18.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -32,15 +30,11 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table1@ds=1 POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY @@ -207,17 +201,11 @@ POSTHOOK: Input: default@test_table2 POSTHOOK: Input: default@test_table2@ds=1 #### A masked pattern was here #### 253 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation, one of the buckets should be empty - -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' and a.key = 238 PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation, one of the buckets should be empty - -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' and a.key = 238 POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_19.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_19.q.out index d09daae..fb25015 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_19.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_19.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 16 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -32,15 +30,11 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table1@ds=1 POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_2.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_2.q.out index 6b1c202..be4abc6 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_2.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_2.q.out @@ -46,14 +46,10 @@ POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwr POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@smb_bucket_3 -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain select /*+mapjoin(a)*/ * from smb_bucket_1 a join smb_bucket_3 b on a.key = b.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_20.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_20.q.out index 8974d14..a5c1d78 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_20.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_20.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key int, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1 (key int, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key int, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key int, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -32,15 +30,11 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table1@ds=1 POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- with different datatypes. This should be a map-reduce operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- with different datatypes. This should be a map-reduce operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY @@ -157,15 +151,11 @@ CLUSTERED BY (value1) SORTED BY (value1) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table3 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation, although the bucketing positions dont match -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.value, a.key, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation, although the bucketing positions dont match -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table3 PARTITION (ds = '1') SELECT a.value, a.key, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY @@ -280,15 +270,11 @@ POSTHOOK: Input: default@test_table3 POSTHOOK: Input: default@test_table3@ds=1 #### A masked pattern was here #### 253 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- However, since an expression is being selected, it should involve a reducer -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT a.key+a.key, a.value, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- However, since an expression is being selected, it should involve a reducer -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '2') SELECT a.key+a.key, a.value, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_21.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_21.q.out index 7828178..1a9118d 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_21.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_21.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) PARTITIONED BY (ds STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -32,15 +30,11 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table1@ds=1 POSTHOOK: Lineage: test_table1 PARTITION(ds=1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table1 PARTITION(ds=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY @@ -105,15 +99,11 @@ CLUSTERED BY (key) SORTED BY (key desc) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table2 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since the sort orders does not match -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since the sort orders does not match -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY @@ -192,15 +182,11 @@ CLUSTERED BY (key) SORTED BY (key, value) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table2 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since the sort columns do not match -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since the sort columns do not match -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY @@ -278,15 +264,11 @@ CLUSTERED BY (key) SORTED BY (value) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table2 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since the sort columns do not match -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since the sort columns do not match -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY @@ -365,15 +347,11 @@ CLUSTERED BY (key) SORTED BY (key) INTO 4 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table2 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since the number of buckets do not match -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since the number of buckets do not match -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY @@ -452,15 +430,11 @@ CLUSTERED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_table2 -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since sort columns do not match -EXPLAIN +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-reduce operation since sort columns do not match -EXPLAIN +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 PARTITION (ds = '1') SELECT a.key, a.value FROM test_table1 a WHERE a.ds = '1' POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_22.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_22.q.out index 4915dab..d8bdef2 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_22.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_22.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) +PREHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_table1 -POSTHOOK: query: -- Create two bucketed and sorted tables -CREATE TABLE test_table1 (key INT, value STRING) +POSTHOOK: query: CREATE TABLE test_table1 (key INT, value STRING) CLUSTERED BY (key) SORTED BY (key) INTO 2 BUCKETS POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -32,14 +30,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table1 POSTHOOK: Lineage: test_table1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN INSERT OVERWRITE TABLE test_table2 +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 SELECT * FROM test_table1 PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN INSERT OVERWRITE TABLE test_table2 +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 SELECT * FROM test_table1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -179,14 +173,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_table1 POSTHOOK: Lineage: test_table1.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_table1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN INSERT OVERWRITE TABLE test_table2 +PREHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 SELECT * FROM test_table1 PREHOOK: type: QUERY -POSTHOOK: query: -- Insert data into the bucketed table by selecting from another bucketed table --- This should be a map-only operation -EXPLAIN INSERT OVERWRITE TABLE test_table2 +POSTHOOK: query: EXPLAIN INSERT OVERWRITE TABLE test_table2 SELECT * FROM test_table1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_25.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_25.q.out index eacb438..c3cdff3 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_25.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_25.q.out @@ -174,20 +174,10 @@ STAGE PLANS: Warning: Map Join MAPJOIN[29][bigTable=?] in task 'Stage-1:MAPRED' is a cross product Warning: Map Join MAPJOIN[31][bigTable=?] in task 'Stage-1:MAPRED' is a cross product Warning: Map Join MAPJOIN[30][bigTable=?] in task 'Stage-2:MAPRED' is a cross product -PREHOOK: query: -- explain --- select * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key left outer join src c on a.key=c.value - --- select a.key from smb_bucket_1 a - -explain +PREHOOK: query: explain select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 PREHOOK: type: QUERY -POSTHOOK: query: -- explain --- select * from smb_bucket_1 a left outer join smb_bucket_2 b on a.key = b.key left outer join src c on a.key=c.value - --- select a.key from smb_bucket_1 a - -explain +POSTHOOK: query: explain select * from (select a.key from smb_bucket_1 a join smb_bucket_2 b on (a.key = b.key) where a.key = 5) t1 left outer join (select c.key from smb_bucket_2 c join smb_bucket_3 d on (c.key = d.key) where c.key=5) t2 on (t1.key=t2.key) where t2.key=5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_3.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_3.q.out index f6e4f4b..d7e5d40 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_3.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_3.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - - - -create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +PREHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@smb_bucket_1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - - - -create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE +POSTHOOK: query: create table smb_bucket_1(key int, value string) CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@smb_bucket_1 diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_4.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_4.q.out index 0f8c339..476971b 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_4.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_4.q.out @@ -46,14 +46,10 @@ POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwr POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@smb_bucket_3 -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain select /*+mapjoin(a,b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain select /*+mapjoin(a,b)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_5.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_5.q.out index 273ab05..be2ff37 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_5.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_5.q.out @@ -46,14 +46,10 @@ POSTHOOK: query: load data local inpath '../../data/files/smbbucket_3.rc' overwr POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@smb_bucket_3 -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain select /*+mapjoin(a,c)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain select /*+mapjoin(a,c)*/ * from smb_bucket_1 a join smb_bucket_2 b on a.key = b.key join smb_bucket_3 c on b.key=c.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/smb_mapjoin_8.q.out b/ql/src/test/results/clientpositive/spark/smb_mapjoin_8.q.out index 17821a8..20cfc0d 100644 --- a/ql/src/test/results/clientpositive/spark/smb_mapjoin_8.q.out +++ b/ql/src/test/results/clientpositive/spark/smb_mapjoin_8.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table smb_bucket_input (key int, value string) stored as rcfile +PREHOOK: query: create table smb_bucket_input (key int, value string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@smb_bucket_input -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table smb_bucket_input (key int, value string) stored as rcfile +POSTHOOK: query: create table smb_bucket_input (key int, value string) stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@smb_bucket_input diff --git a/ql/src/test/results/clientpositive/spark/sort.q.out b/ql/src/test/results/clientpositive/spark/sort.q.out index 71ff789..298dc51 100644 --- a/ql/src/test/results/clientpositive/spark/sort.q.out +++ b/ql/src/test/results/clientpositive/spark/sort.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT x.* FROM SRC x SORT BY key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT x.* FROM SRC x SORT BY key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/stats1.q.out b/ql/src/test/results/clientpositive/spark/stats1.q.out index 2f29d27..e691f51 100644 --- a/ql/src/test/results/clientpositive/spark/stats1.q.out +++ b/ql/src/test/results/clientpositive/spark/stats1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table tmptable(key string, value string) +PREHOOK: query: create table tmptable(key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table tmptable(key string, value string) +POSTHOOK: query: create table tmptable(key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable @@ -191,17 +187,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Load a file into a existing table --- Some stats (numFiles, totalSize) should be updated correctly --- Some other stats (numRows, rawDataSize) should be cleared -load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE tmptable +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE tmptable PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@tmptable -POSTHOOK: query: -- Load a file into a existing table --- Some stats (numFiles, totalSize) should be updated correctly --- Some other stats (numRows, rawDataSize) should be cleared -load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE tmptable +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE tmptable POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@tmptable diff --git a/ql/src/test/results/clientpositive/spark/stats18.q.out b/ql/src/test/results/clientpositive/spark/stats18.q.out index 3ad9679..4945808 100644 --- a/ql/src/test/results/clientpositive/spark/stats18.q.out +++ b/ql/src/test/results/clientpositive/spark/stats18.q.out @@ -16,16 +16,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@stats_part@ds=2010-04-08/hr=13 POSTHOOK: Lineage: stats_part PARTITION(ds=2010-04-08,hr=13).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: stats_part PARTITION(ds=2010-04-08,hr=13).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Load a file into a existing partition --- Some stats (numFiles, totalSize) should be updated correctly --- Some other stats (numRows, rawDataSize) should be cleared -desc formatted stats_part partition (ds='2010-04-08', hr='13') +PREHOOK: query: desc formatted stats_part partition (ds='2010-04-08', hr='13') PREHOOK: type: DESCTABLE PREHOOK: Input: default@stats_part -POSTHOOK: query: -- Load a file into a existing partition --- Some stats (numFiles, totalSize) should be updated correctly --- Some other stats (numRows, rawDataSize) should be cleared -desc formatted stats_part partition (ds='2010-04-08', hr='13') +POSTHOOK: query: desc formatted stats_part partition (ds='2010-04-08', hr='13') POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@stats_part # col_name data_type comment diff --git a/ql/src/test/results/clientpositive/spark/stats_noscan_1.q.out b/ql/src/test/results/clientpositive/spark/stats_noscan_1.q.out index 7382b31..9e1652b 100644 --- a/ql/src/test/results/clientpositive/spark/stats_noscan_1.q.out +++ b/ql/src/test/results/clientpositive/spark/stats_noscan_1.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- test analyze table ... compute statistics noscan - --- 1. test full spec -create table analyze_srcpart like srcpart +PREHOOK: query: create table analyze_srcpart like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@analyze_srcpart -POSTHOOK: query: -- test analyze table ... compute statistics noscan - --- 1. test full spec -create table analyze_srcpart like srcpart +POSTHOOK: query: create table analyze_srcpart like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@analyze_srcpart @@ -76,12 +70,10 @@ POSTHOOK: Input: default@analyze_srcpart POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@analyze_srcpart POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 -PREHOOK: query: -- confirm result -describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11) +PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11) PREHOOK: type: DESCTABLE PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: -- confirm result -describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11) +POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11) POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@analyze_srcpart # col_name data_type comment @@ -272,13 +264,11 @@ POSTHOOK: query: drop table analyze_srcpart POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@analyze_srcpart POSTHOOK: Output: default@analyze_srcpart -PREHOOK: query: -- 2. test partial spec -create table analyze_srcpart_partial like srcpart +PREHOOK: query: create table analyze_srcpart_partial like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@analyze_srcpart_partial -POSTHOOK: query: -- 2. test partial spec -create table analyze_srcpart_partial like srcpart +POSTHOOK: query: create table analyze_srcpart_partial like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@analyze_srcpart_partial @@ -338,12 +328,10 @@ POSTHOOK: Input: default@analyze_srcpart_partial@ds=2008-04-08/hr=12 POSTHOOK: Output: default@analyze_srcpart_partial POSTHOOK: Output: default@analyze_srcpart_partial@ds=2008-04-08/hr=11 POSTHOOK: Output: default@analyze_srcpart_partial@ds=2008-04-08/hr=12 -PREHOOK: query: -- confirm result -describe formatted analyze_srcpart_partial PARTITION(ds='2008-04-08',hr=11) +PREHOOK: query: describe formatted analyze_srcpart_partial PARTITION(ds='2008-04-08',hr=11) PREHOOK: type: DESCTABLE PREHOOK: Input: default@analyze_srcpart_partial -POSTHOOK: query: -- confirm result -describe formatted analyze_srcpart_partial PARTITION(ds='2008-04-08',hr=11) +POSTHOOK: query: describe formatted analyze_srcpart_partial PARTITION(ds='2008-04-08',hr=11) POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@analyze_srcpart_partial # col_name data_type comment diff --git a/ql/src/test/results/clientpositive/spark/stats_noscan_2.q.out b/ql/src/test/results/clientpositive/spark/stats_noscan_2.q.out index 40b2a66..1e9603e 100644 --- a/ql/src/test/results/clientpositive/spark/stats_noscan_2.q.out +++ b/ql/src/test/results/clientpositive/spark/stats_noscan_2.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- test analyze table compute statistiscs [noscan] on external table --- 1 test table #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default PREHOOK: Output: default@anaylyze_external -POSTHOOK: query: -- test analyze table compute statistiscs [noscan] on external table --- 1 test table #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### @@ -118,15 +114,11 @@ POSTHOOK: query: drop table anaylyze_external POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@anaylyze_external POSTHOOK: Output: default@anaylyze_external -PREHOOK: query: -- 2 test partition --- prepare data -create table texternal(key string, val string) partitioned by (insertdate string) +PREHOOK: query: create table texternal(key string, val string) partitioned by (insertdate string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@texternal -POSTHOOK: query: -- 2 test partition --- prepare data -create table texternal(key string, val string) partitioned by (insertdate string) +POSTHOOK: query: create table texternal(key string, val string) partitioned by (insertdate string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@texternal @@ -158,13 +150,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@texternal #### A masked pattern was here #### 500 -PREHOOK: query: -- create external table #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default PREHOOK: Output: default@anaylyze_external -POSTHOOK: query: -- create external table #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### @@ -190,15 +180,13 @@ POSTHOOK: Input: default@anaylyze_external POSTHOOK: Input: default@anaylyze_external@insertdate=2008-01-01 #### A masked pattern was here #### 500 -PREHOOK: query: -- analyze -analyze table anaylyze_external PARTITION (insertdate='2008-01-01') compute statistics noscan +PREHOOK: query: analyze table anaylyze_external PARTITION (insertdate='2008-01-01') compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@anaylyze_external PREHOOK: Input: default@anaylyze_external@insertdate=2008-01-01 PREHOOK: Output: default@anaylyze_external PREHOOK: Output: default@anaylyze_external@insertdate=2008-01-01 -POSTHOOK: query: -- analyze -analyze table anaylyze_external PARTITION (insertdate='2008-01-01') compute statistics noscan +POSTHOOK: query: analyze table anaylyze_external PARTITION (insertdate='2008-01-01') compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@anaylyze_external POSTHOOK: Input: default@anaylyze_external@insertdate=2008-01-01 diff --git a/ql/src/test/results/clientpositive/spark/stats_partscan_1_23.q.out b/ql/src/test/results/clientpositive/spark/stats_partscan_1_23.q.out index cbd5d33..50096a5 100644 --- a/ql/src/test/results/clientpositive/spark/stats_partscan_1_23.q.out +++ b/ql/src/test/results/clientpositive/spark/stats_partscan_1_23.q.out @@ -1,26 +1,10 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- This test uses mapred.max.split.size/mapred.max.split.size for controlling --- number of input splits. --- stats_partscan_1.q is the same test with this but has different result. - --- test analyze table ... compute statistics partialscan - --- 1. prepare data -CREATE table analyze_srcpart_partial_scan (key STRING, value STRING) +PREHOOK: query: CREATE table analyze_srcpart_partial_scan (key STRING, value STRING) partitioned by (ds string, hr string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@analyze_srcpart_partial_scan -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- This test uses mapred.max.split.size/mapred.max.split.size for controlling --- number of input splits. --- stats_partscan_1.q is the same test with this but has different result. - --- test analyze table ... compute statistics partialscan - --- 1. prepare data -CREATE table analyze_srcpart_partial_scan (key STRING, value STRING) +POSTHOOK: query: CREATE table analyze_srcpart_partial_scan (key STRING, value STRING) partitioned by (ds string, hr string) stored as rcfile POSTHOOK: type: CREATETABLE @@ -90,12 +74,10 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- 2. partialscan -explain +PREHOOK: query: explain analyze table analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) compute statistics partialscan PREHOOK: type: QUERY -POSTHOOK: query: -- 2. partialscan -explain +POSTHOOK: query: explain analyze table analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) compute statistics partialscan POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -121,12 +103,10 @@ POSTHOOK: Input: default@analyze_srcpart_partial_scan POSTHOOK: Input: default@analyze_srcpart_partial_scan@ds=2008-04-08/hr=11 POSTHOOK: Output: default@analyze_srcpart_partial_scan POSTHOOK: Output: default@analyze_srcpart_partial_scan@ds=2008-04-08/hr=11 -PREHOOK: query: -- 3. confirm result -describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) +PREHOOK: query: describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) PREHOOK: type: DESCTABLE PREHOOK: Input: default@analyze_srcpart_partial_scan -POSTHOOK: query: -- 3. confirm result -describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) +POSTHOOK: query: describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@analyze_srcpart_partial_scan # col_name data_type comment diff --git a/ql/src/test/results/clientpositive/spark/statsfs.q.out b/ql/src/test/results/clientpositive/spark/statsfs.q.out index 9d63bf7..d070e9a 100644 --- a/ql/src/test/results/clientpositive/spark/statsfs.q.out +++ b/ql/src/test/results/clientpositive/spark/statsfs.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- stats computation on partitioned table with analyze command - -create table t1 (key string, value string) partitioned by (ds string) +PREHOOK: query: create table t1 (key string, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- stats computation on partitioned table with analyze command - -create table t1 (key string, value string) partitioned by (ds string) +POSTHOOK: query: create table t1 (key string, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 @@ -130,15 +126,11 @@ POSTHOOK: query: drop table t1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 -PREHOOK: query: -- stats computation on partitioned table with autogather on insert query - -create table t1 (key string, value string) partitioned by (ds string) +PREHOOK: query: create table t1 (key string, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- stats computation on partitioned table with autogather on insert query - -create table t1 (key string, value string) partitioned by (ds string) +POSTHOOK: query: create table t1 (key string, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 @@ -248,15 +240,11 @@ POSTHOOK: query: drop table t1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 -PREHOOK: query: -- analyze stmt on unpartitioned table - -create table t1 (key string, value string) +PREHOOK: query: create table t1 (key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- analyze stmt on unpartitioned table - -create table t1 (key string, value string) +POSTHOOK: query: create table t1 (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 @@ -319,15 +307,11 @@ POSTHOOK: query: drop table t1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 -PREHOOK: query: -- stats computation on unpartitioned table with autogather on insert query - -create table t1 (key string, value string) +PREHOOK: query: create table t1 (key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- stats computation on unpartitioned table with autogather on insert query - -create table t1 (key string, value string) +POSTHOOK: query: create table t1 (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 @@ -384,17 +368,11 @@ POSTHOOK: query: drop table t1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 -PREHOOK: query: -- stats computation on partitioned table with autogather on insert query with dynamic partitioning - - -create table t1 (key string, value string) partitioned by (ds string, hr string) +PREHOOK: query: create table t1 (key string, value string) partitioned by (ds string, hr string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- stats computation on partitioned table with autogather on insert query with dynamic partitioning - - -create table t1 (key string, value string) partitioned by (ds string, hr string) +POSTHOOK: query: create table t1 (key string, value string) partitioned by (ds string, hr string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 diff --git a/ql/src/test/results/clientpositive/spark/subquery_exists.q.out b/ql/src/test/results/clientpositive/spark/subquery_exists.q.out index c28a218..b5eb523 100644 --- a/ql/src/test/results/clientpositive/spark/subquery_exists.q.out +++ b/ql/src/test/results/clientpositive/spark/subquery_exists.q.out @@ -1,8 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- no agg, corr --- SORT_QUERY_RESULTS -explain +PREHOOK: query: explain select * from src b where exists @@ -11,11 +7,7 @@ where exists where b.value = a.value and a.key = b.key and a.value > 'val_9' ) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- no agg, corr --- SORT_QUERY_RESULTS -explain +POSTHOOK: query: explain select * from src b where exists @@ -187,8 +179,7 @@ POSTHOOK: Input: default@src 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- view test -create view cv1 as +PREHOOK: query: create view cv1 as select * from src b where exists @@ -199,8 +190,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@cv1 -POSTHOOK: query: -- view test -create view cv1 as +POSTHOOK: query: create view cv1 as select * from src b where exists @@ -232,8 +222,7 @@ POSTHOOK: Input: default@src 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- sq in from -select * +PREHOOK: query: select * from (select * from src b where exists @@ -244,8 +233,7 @@ from (select * PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- sq in from -select * +POSTHOOK: query: select * from (select * from src b where exists @@ -267,8 +255,7 @@ POSTHOOK: Input: default@src 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- upper case in subq -explain +PREHOOK: query: explain select * from src b where exists @@ -277,8 +264,7 @@ where exists where b.VALUE = a.VALUE ) PREHOOK: type: QUERY -POSTHOOK: query: -- upper case in subq -explain +POSTHOOK: query: explain select * from src b where exists diff --git a/ql/src/test/results/clientpositive/spark/subquery_in.q.out b/ql/src/test/results/clientpositive/spark/subquery_in.q.out index 36e3e6e..f6214bc 100644 --- a/ql/src/test/results/clientpositive/spark/subquery_in.q.out +++ b/ql/src/test/results/clientpositive/spark/subquery_in.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- non agg, non corr -explain +PREHOOK: query: explain select * from src where src.key in (select key from src s1 where s1.key > '9') PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- non agg, non corr -explain +POSTHOOK: query: explain select * from src where src.key in (select key from src s1 where s1.key > '9') @@ -118,8 +112,7 @@ POSTHOOK: Input: default@src 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- non agg, corr -explain +PREHOOK: query: explain select * from src b where b.key in @@ -128,8 +121,7 @@ where b.key in where b.value = a.value and a.key > '9' ) PREHOOK: type: QUERY -POSTHOOK: query: -- non agg, corr -explain +POSTHOOK: query: explain select * from src b where b.key in @@ -302,8 +294,7 @@ POSTHOOK: Input: default@src 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- agg, non corr -explain +PREHOOK: query: explain select p_name, p_size from part where part.p_size in @@ -312,8 +303,7 @@ part where part.p_size in where r <= 2 ) PREHOOK: type: QUERY -POSTHOOK: query: -- agg, non corr -explain +POSTHOOK: query: explain select p_name, p_size from part where part.p_size in @@ -480,8 +470,7 @@ POSTHOOK: Input: default@part #### A masked pattern was here #### almond antique medium spring khaki 6 almond antique salmon chartreuse burlywood 6 -PREHOOK: query: -- agg, corr -explain +PREHOOK: query: explain select p_mfgr, p_name, p_size from part b where b.p_size in (select min(p_size) @@ -489,8 +478,7 @@ from part b where b.p_size in where r <= 2 and b.p_mfgr = a.p_mfgr ) PREHOOK: type: QUERY -POSTHOOK: query: -- agg, corr -explain +POSTHOOK: query: explain select p_mfgr, p_name, p_size from part b where b.p_size in (select min(p_size) @@ -723,8 +711,7 @@ Manufacturer#2 almond aquamarine midnight light salmon 2 Manufacturer#3 almond antique misty red olive 1 Manufacturer#4 almond aquamarine yellow dodger mint 7 Manufacturer#5 almond antique sky peru orange 2 -PREHOOK: query: -- distinct, corr -explain +PREHOOK: query: explain select * from src b where b.key in @@ -733,8 +720,7 @@ where b.key in where b.value = a.value and a.key > '9' ) PREHOOK: type: QUERY -POSTHOOK: query: -- distinct, corr -explain +POSTHOOK: query: explain select * from src b where b.key in @@ -912,16 +898,14 @@ POSTHOOK: Input: default@src 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- non agg, non corr, windowing -select p_mfgr, p_name, p_size +PREHOOK: query: select p_mfgr, p_name, p_size from part where part.p_size in (select first_value(p_size) over(partition by p_mfgr order by p_size) from part) PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- non agg, non corr, windowing -select p_mfgr, p_name, p_size +POSTHOOK: query: select p_mfgr, p_name, p_size from part where part.p_size in (select first_value(p_size) over(partition by p_mfgr order by p_size) from part) @@ -934,15 +918,13 @@ Manufacturer#2 almond aquamarine midnight light salmon 2 Manufacturer#3 almond antique misty red olive 1 Manufacturer#4 almond aquamarine yellow dodger mint 7 Manufacturer#5 almond antique sky peru orange 2 -PREHOOK: query: -- non agg, non corr, with join in Parent Query -explain +PREHOOK: query: explain select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR') PREHOOK: type: QUERY -POSTHOOK: query: -- non agg, non corr, with join in Parent Query -explain +POSTHOOK: query: explain select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and @@ -1112,16 +1094,14 @@ POSTHOOK: Input: default@lineitem 61336 8855 64128 9141 82704 7721 -PREHOOK: query: -- non agg, corr, with join in Parent Query -select p.p_partkey, li.l_suppkey +PREHOOK: query: select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) PREHOOK: type: QUERY PREHOOK: Input: default@lineitem #### A masked pattern was here #### -POSTHOOK: query: -- non agg, corr, with join in Parent Query -select p.p_partkey, li.l_suppkey +POSTHOOK: query: select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) diff --git a/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out b/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out index b1d225c..3807511 100644 --- a/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out +++ b/ql/src/test/results/clientpositive/spark/subquery_multiinsert.q.out @@ -1,15 +1,11 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src_4( +PREHOOK: query: CREATE TABLE src_4( key STRING, value STRING ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_4 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src_4( +POSTHOOK: query: CREATE TABLE src_4( key STRING, value STRING ) diff --git a/ql/src/test/results/clientpositive/spark/table_access_keys_stats.q.out b/ql/src/test/results/clientpositive/spark/table_access_keys_stats.q.out index 07727d4..923441a 100644 --- a/ql/src/test/results/clientpositive/spark/table_access_keys_stats.q.out +++ b/ql/src/test/results/clientpositive/spark/table_access_keys_stats.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This test is used for testing the TableAccessAnalyzer - -CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 @@ -17,8 +14,7 @@ PREHOOK: query: CREATE TABLE T3(key STRING, val STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T3 -PREHOOK: query: -- Simple group-by queries -SELECT key, count(1) FROM T1 GROUP BY key +PREHOOK: query: SELECT key, count(1) FROM T1 GROUP BY key PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### @@ -45,8 +41,7 @@ Keys:key,val 7 17 1 8 18 1 8 28 1 -PREHOOK: query: -- With subqueries and column aliases -SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key +PREHOOK: query: SELECT key, count(1) FROM (SELECT key, val FROM T1) subq1 GROUP BY key PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### @@ -72,8 +67,7 @@ Keys:key 3 1 7 1 8 2 -PREHOOK: query: -- With constants -SELECT 1, key, count(1) FROM T1 GROUP BY 1, key +PREHOOK: query: SELECT 1, key, count(1) FROM T1 GROUP BY 1, key PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### @@ -114,8 +108,7 @@ Keys:key,val 7 1 17 2 1 8 1 18 2 1 8 1 28 2 1 -PREHOOK: query: -- no mapping with functions -SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 +PREHOOK: query: SELECT key, key + 1, count(1) FROM T1 GROUP BY key, key + 1 PREHOOK: type: QUERY PREHOOK: Input: default@t1 #### A masked pattern was here #### @@ -139,8 +132,7 @@ Keys:key 2.0 1 4.0 1 6.0 1 -PREHOOK: query: -- group by followed by union -SELECT * FROM ( +PREHOOK: query: SELECT * FROM ( SELECT key, count(1) as c FROM T1 GROUP BY key UNION ALL SELECT key, count(1) as c FROM T1 GROUP BY key @@ -166,8 +158,7 @@ Keys:key 7 1 8 2 8 2 -PREHOOK: query: -- group by followed by a join -SELECT * FROM +PREHOOK: query: SELECT * FROM (SELECT key, count(1) as c FROM T1 GROUP BY key) subq1 JOIN (SELECT key, count(1) as c FROM T1 GROUP BY key) subq2 @@ -211,8 +202,7 @@ Keys:key,val 7 1 7 17 1 8 2 8 18 1 8 2 8 28 1 -PREHOOK: query: -- constants from sub-queries should work fine -SELECT key, constant, val, count(1) from +PREHOOK: query: SELECT key, constant, val, count(1) from (SELECT key, 1 as constant, val from T1) subq1 group by key, constant, val PREHOOK: type: QUERY @@ -228,8 +218,7 @@ Keys:key,val 7 1 17 1 8 1 18 1 8 1 28 1 -PREHOOK: query: -- multiple levels of constants from sub-queries should work fine -SELECT key, constant3, val, count(1) FROM +PREHOOK: query: SELECT key, constant3, val, count(1) FROM ( SELECT key, constant AS constant2, val, 2 AS constant3 FROM @@ -252,8 +241,7 @@ Keys:key,val 7 2 17 1 8 2 18 1 8 2 28 1 -PREHOOK: query: -- work with insert overwrite -FROM T1 +PREHOOK: query: FROM T1 INSERT OVERWRITE TABLE T2 SELECT key, count(1) GROUP BY key, 1 INSERT OVERWRITE TABLE T3 SELECT key, sum(val) GROUP BY key PREHOOK: type: QUERY @@ -268,8 +256,7 @@ Operator:GBY_8 Table:default@t1 Keys:key -PREHOOK: query: -- simple joins -SELECT * +PREHOOK: query: SELECT * FROM T1 JOIN T2 ON T1.key = t2.key ORDER BY T1.key ASC, T1.val ASC @@ -302,8 +289,7 @@ Keys:key,val Table:default@t2 Keys:key,val -PREHOOK: query: -- map join -SELECT /*+ MAPJOIN(a) */ * +PREHOOK: query: SELECT /*+ MAPJOIN(a) */ * FROM T1 a JOIN T2 b ON a.key = b.key PREHOOK: type: QUERY @@ -322,8 +308,7 @@ Keys:key 7 17 7 1 8 18 8 2 8 28 8 2 -PREHOOK: query: -- with constant in join condition -SELECT * +PREHOOK: query: SELECT * FROM T1 JOIN T2 ON T1.key = T2.key AND T1.val = 3 and T2.val = 3 PREHOOK: type: QUERY @@ -336,8 +321,7 @@ Keys:key Table:default@t2 Keys:key -PREHOOK: query: -- subqueries -SELECT * +PREHOOK: query: SELECT * FROM ( SELECT val FROM T1 WHERE key = 5 @@ -375,8 +359,7 @@ Keys:val Table:default@t2 Keys:val -PREHOOK: query: -- with column aliases in subqueries -SELECT * +PREHOOK: query: SELECT * FROM ( SELECT val as v FROM T1 WHERE key = 5 @@ -396,8 +379,7 @@ Keys:val Table:default@t2 Keys:val -PREHOOK: query: -- with constants in subqueries -SELECT * +PREHOOK: query: SELECT * FROM ( SELECT key, val FROM T1 @@ -417,8 +399,7 @@ Keys:key Table:default@t2 Keys:key -PREHOOK: query: -- multiple levels of constants in subqueries -SELECT * +PREHOOK: query: SELECT * FROM ( SELECT key, val from @@ -441,8 +422,7 @@ Keys:key Table:default@t2 Keys:key -PREHOOK: query: -- no mapping on functions -SELECT * +PREHOOK: query: SELECT * FROM ( SELECT key, val from T1 @@ -456,8 +436,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 #### A masked pattern was here #### -PREHOOK: query: -- join followed by group by -SELECT subq1.val, COUNT(*) +PREHOOK: query: SELECT subq1.val, COUNT(*) FROM ( SELECT key, val FROM T1 @@ -478,8 +457,7 @@ Keys:key Table:default@t2 Keys:key -PREHOOK: query: -- join followed by union -SELECT * +PREHOOK: query: SELECT * FROM ( SELECT subq1.val, COUNT(*) @@ -519,8 +497,7 @@ Keys:val 17.0 1 46.0 1 Warning: Shuffle Join JOIN[20][tables = [$hdt$_0, $hdt$_1]] in Work 'Reducer 4' is a cross product -PREHOOK: query: -- join followed by join -SELECT * +PREHOOK: query: SELECT * FROM ( SELECT subq1.val as val, COUNT(*) @@ -548,8 +525,7 @@ Keys:key Table:default@t2 Keys:key -PREHOOK: query: -- simple joins -SELECT * +PREHOOK: query: SELECT * FROM T1 JOIN T2 ON T1.key = t2.key ORDER BY T1.key ASC, T1.val ASC @@ -582,8 +558,7 @@ Keys:key,val Table:default@t2 Keys:key,val -PREHOOK: query: -- group by followed by a join -SELECT * FROM +PREHOOK: query: SELECT * FROM (SELECT key, count(1) as c FROM T1 GROUP BY key) subq1 JOIN (SELECT key, count(1) as c FROM T1 GROUP BY key) subq2 diff --git a/ql/src/test/results/clientpositive/spark/temp_table_external.q.out b/ql/src/test/results/clientpositive/spark/temp_table_external.q.out index 001cd98..54954b5 100644 --- a/ql/src/test/results/clientpositive/spark/temp_table_external.q.out +++ b/ql/src/test/results/clientpositive/spark/temp_table_external.q.out @@ -20,13 +20,11 @@ POSTHOOK: Input: default@temp_table_external NULL 35 48 NULL 100 100 -PREHOOK: query: -- Even after we drop the table, the data directory should still be there -drop table temp_table_external +PREHOOK: query: drop table temp_table_external PREHOOK: type: DROPTABLE PREHOOK: Input: default@temp_table_external PREHOOK: Output: default@temp_table_external -POSTHOOK: query: -- Even after we drop the table, the data directory should still be there -drop table temp_table_external +POSTHOOK: query: drop table temp_table_external POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@temp_table_external POSTHOOK: Output: default@temp_table_external diff --git a/ql/src/test/results/clientpositive/spark/temp_table_gb1.q.out b/ql/src/test/results/clientpositive/spark/temp_table_gb1.q.out index 473245d..05d7ba2 100644 --- a/ql/src/test/results/clientpositive/spark/temp_table_gb1.q.out +++ b/ql/src/test/results/clientpositive/spark/temp_table_gb1.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Taken from groupby2.q -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_g2 -POSTHOOK: query: -- Taken from groupby2.q -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_g2 diff --git a/ql/src/test/results/clientpositive/spark/temp_table_join1.q.out b/ql/src/test/results/clientpositive/spark/temp_table_join1.q.out index 66d9d23..b7e9a69 100644 --- a/ql/src/test/results/clientpositive/spark/temp_table_join1.q.out +++ b/ql/src/test/results/clientpositive/spark/temp_table_join1.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src_nontemp AS SELECT * FROM src limit 10 +PREHOOK: query: CREATE TABLE src_nontemp AS SELECT * FROM src limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@src_nontemp -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src_nontemp AS SELECT * FROM src limit 10 +POSTHOOK: query: CREATE TABLE src_nontemp AS SELECT * FROM src limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default @@ -24,13 +20,11 @@ POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default POSTHOOK: Output: default@src_temp -PREHOOK: query: -- Non temp table join -EXPLAIN +PREHOOK: query: EXPLAIN FROM src_nontemp src1 JOIN src_nontemp src2 ON (src1.key = src2.key) SELECT src1.key, src2.value PREHOOK: type: QUERY -POSTHOOK: query: -- Non temp table join -EXPLAIN +POSTHOOK: query: EXPLAIN FROM src_nontemp src1 JOIN src_nontemp src2 ON (src1.key = src2.key) SELECT src1.key, src2.value POSTHOOK: type: QUERY @@ -128,13 +122,11 @@ POSTHOOK: Input: default@src_nontemp 484 val_484 86 val_86 98 val_98 -PREHOOK: query: -- Non temp table join with temp table -EXPLAIN +PREHOOK: query: EXPLAIN FROM src_nontemp src1 JOIN src_temp src2 ON (src1.key = src2.key) SELECT src1.key, src2.value PREHOOK: type: QUERY -POSTHOOK: query: -- Non temp table join with temp table -EXPLAIN +POSTHOOK: query: EXPLAIN FROM src_nontemp src1 JOIN src_temp src2 ON (src1.key = src2.key) SELECT src1.key, src2.value POSTHOOK: type: QUERY @@ -234,13 +226,11 @@ POSTHOOK: Input: default@src_temp 484 val_484 86 val_86 98 val_98 -PREHOOK: query: -- temp table join with temp table -EXPLAIN +PREHOOK: query: EXPLAIN FROM src_temp src1 JOIN src_temp src2 ON (src1.key = src2.key) SELECT src1.key, src2.value PREHOOK: type: QUERY -POSTHOOK: query: -- temp table join with temp table -EXPLAIN +POSTHOOK: query: EXPLAIN FROM src_temp src1 JOIN src_temp src2 ON (src1.key = src2.key) SELECT src1.key, src2.value POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/tez_join_tests.q.out b/ql/src/test/results/clientpositive/spark/tez_join_tests.q.out index d6d9baa..5818191 100644 --- a/ql/src/test/results/clientpositive/spark/tez_join_tests.q.out +++ b/ql/src/test/results/clientpositive/spark/tez_join_tests.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/tez_joins_explain.q.out b/ql/src/test/results/clientpositive/spark/tez_joins_explain.q.out index d383f1d..7082df0 100644 --- a/ql/src/test/results/clientpositive/spark/tez_joins_explain.q.out +++ b/ql/src/test/results/clientpositive/spark/tez_joins_explain.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS -explain +PREHOOK: query: explain select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS -explain +POSTHOOK: query: explain select * from (select b.key, b.value from src1 a left outer join src b on (a.key = b.key) order by b.key) x right outer join src c on (x.value = c.value) order by x.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/timestamp_udf.q.out b/ql/src/test/results/clientpositive/spark/timestamp_udf.q.out index 904c894..47f84cb 100644 --- a/ql/src/test/results/clientpositive/spark/timestamp_udf.q.out +++ b/ql/src/test/results/clientpositive/spark/timestamp_udf.q.out @@ -42,15 +42,13 @@ POSTHOOK: Output: default@timestamp_udf POSTHOOK: Output: default@timestamp_udf_string POSTHOOK: Lineage: timestamp_udf.t EXPRESSION [] POSTHOOK: Lineage: timestamp_udf_string.t SIMPLE [] -PREHOOK: query: -- Test UDFs with Timestamp input -select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), +PREHOOK: query: select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), weekofyear(t), hour(t), minute(t), second(t), to_date(t) from timestamp_udf PREHOOK: type: QUERY PREHOOK: Input: default@timestamp_udf #### A masked pattern was here #### -POSTHOOK: query: -- Test UDFs with Timestamp input -select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), +POSTHOOK: query: select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), weekofyear(t), hour(t), minute(t), second(t), to_date(t) from timestamp_udf POSTHOOK: type: QUERY @@ -145,15 +143,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@timestamp_udf #### A masked pattern was here #### 2011-05-06 07:08:09.1234567 2011-05-06 12:08:09.1234567 2011-05-06 07:08:09.1234567 2011-05-06 12:08:09.1234567 -PREHOOK: query: -- Test UDFs with string input -select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), +PREHOOK: query: select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), weekofyear(t), hour(t), minute(t), second(t), to_date(t) from timestamp_udf_string PREHOOK: type: QUERY PREHOOK: Input: default@timestamp_udf_string #### A masked pattern was here #### -POSTHOOK: query: -- Test UDFs with string input -select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), +POSTHOOK: query: select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), weekofyear(t), hour(t), minute(t), second(t), to_date(t) from timestamp_udf_string POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/transform2.q.out b/ql/src/test/results/clientpositive/spark/transform2.q.out index 28d098d..aeeaebf 100644 --- a/ql/src/test/results/clientpositive/spark/transform2.q.out +++ b/ql/src/test/results/clientpositive/spark/transform2.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Transform with a function that has many parameters -SELECT TRANSFORM(substr(key, 1, 2)) USING 'cat' FROM src LIMIT 1 +PREHOOK: query: SELECT TRANSFORM(substr(key, 1, 2)) USING 'cat' FROM src LIMIT 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Transform with a function that has many parameters -SELECT TRANSFORM(substr(key, 1, 2)) USING 'cat' FROM src LIMIT 1 +POSTHOOK: query: SELECT TRANSFORM(substr(key, 1, 2)) USING 'cat' FROM src LIMIT 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/transform_ppr1.q.out b/ql/src/test/results/clientpositive/spark/transform_ppr1.q.out index 0dfd7d0..acf9ef0 100644 --- a/ql/src/test/results/clientpositive/spark/transform_ppr1.q.out +++ b/ql/src/test/results/clientpositive/spark/transform_ppr1.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED FROM ( FROM srcpart src SELECT TRANSFORM(src.ds, src.key, src.value) @@ -9,9 +7,7 @@ FROM ( ) tmap SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 AND tmap.ds = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED FROM ( FROM srcpart src SELECT TRANSFORM(src.ds, src.key, src.value) diff --git a/ql/src/test/results/clientpositive/spark/transform_ppr2.q.out b/ql/src/test/results/clientpositive/spark/transform_ppr2.q.out index 3959df6..e5725ad 100644 --- a/ql/src/test/results/clientpositive/spark/transform_ppr2.q.out +++ b/ql/src/test/results/clientpositive/spark/transform_ppr2.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED FROM ( FROM srcpart src SELECT TRANSFORM(src.ds, src.key, src.value) @@ -10,9 +8,7 @@ FROM ( ) tmap SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED FROM ( FROM srcpart src SELECT TRANSFORM(src.ds, src.key, src.value) diff --git a/ql/src/test/results/clientpositive/spark/truncate_column_buckets.q.out b/ql/src/test/results/clientpositive/spark/truncate_column_buckets.q.out index 7147c22..cab0b83 100644 --- a/ql/src/test/results/clientpositive/spark/truncate_column_buckets.q.out +++ b/ql/src/test/results/clientpositive/spark/truncate_column_buckets.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Tests truncating columns from a bucketed table, table should remain bucketed - -CREATE TABLE test_tab (key STRING, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS RCFILE +PREHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Tests truncating columns from a bucketed table, table should remain bucketed - -CREATE TABLE test_tab (key STRING, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS RCFILE +POSTHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_tab @@ -20,16 +16,14 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_tab POSTHOOK: Lineage: test_tab.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_tab.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Check how many rows there are in each bucket, there should be two rows -SELECT cnt FROM ( +PREHOOK: query: SELECT cnt FROM ( SELECT INPUT__FILE__NAME file_name, count(*) cnt FROM test_tab GROUP BY INPUT__FILE__NAME ORDER BY file_name DESC)a PREHOOK: type: QUERY PREHOOK: Input: default@test_tab #### A masked pattern was here #### -POSTHOOK: query: -- Check how many rows there are in each bucket, there should be two rows -SELECT cnt FROM ( +POSTHOOK: query: SELECT cnt FROM ( SELECT INPUT__FILE__NAME file_name, count(*) cnt FROM test_tab GROUP BY INPUT__FILE__NAME ORDER BY file_name DESC)a @@ -38,28 +32,22 @@ POSTHOOK: Input: default@test_tab #### A masked pattern was here #### 258 242 -PREHOOK: query: -- Truncate a column on which the table is not bucketed -TRUNCATE TABLE test_tab COLUMNS (value) +PREHOOK: query: TRUNCATE TABLE test_tab COLUMNS (value) PREHOOK: type: TRUNCATETABLE PREHOOK: Input: default@test_tab PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Truncate a column on which the table is not bucketed -TRUNCATE TABLE test_tab COLUMNS (value) +POSTHOOK: query: TRUNCATE TABLE test_tab COLUMNS (value) POSTHOOK: type: TRUNCATETABLE POSTHOOK: Input: default@test_tab POSTHOOK: Output: default@test_tab -PREHOOK: query: -- Check how many rows there are in each bucket, this should produce the same rows as before --- because truncate should not break bucketing -SELECT cnt FROM ( +PREHOOK: query: SELECT cnt FROM ( SELECT INPUT__FILE__NAME file_name, count(*) cnt FROM test_tab GROUP BY INPUT__FILE__NAME ORDER BY file_name DESC)a PREHOOK: type: QUERY PREHOOK: Input: default@test_tab #### A masked pattern was here #### -POSTHOOK: query: -- Check how many rows there are in each bucket, this should produce the same rows as before --- because truncate should not break bucketing -SELECT cnt FROM ( +POSTHOOK: query: SELECT cnt FROM ( SELECT INPUT__FILE__NAME file_name, count(*) cnt FROM test_tab GROUP BY INPUT__FILE__NAME ORDER BY file_name DESC)a diff --git a/ql/src/test/results/clientpositive/spark/uber_reduce.q.out b/ql/src/test/results/clientpositive/spark/uber_reduce.q.out index 2a29131..6fa0e6a 100644 --- a/ql/src/test/results/clientpositive/spark/uber_reduce.q.out +++ b/ql/src/test/results/clientpositive/spark/uber_reduce.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- Uberized mode is a YARN option, ignore this test for non-YARN Hadoop versions --- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) - -CREATE TABLE T1(key STRING, val STRING) +PREHOOK: query: CREATE TABLE T1(key STRING, val STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@T1 -POSTHOOK: query: -- Uberized mode is a YARN option, ignore this test for non-YARN Hadoop versions --- EXCLUDE_HADOOP_MAJOR_VERSIONS(0.20S) - -CREATE TABLE T1(key STRING, val STRING) +POSTHOOK: query: CREATE TABLE T1(key STRING, val STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@T1 diff --git a/ql/src/test/results/clientpositive/spark/udf_percentile.q.out b/ql/src/test/results/clientpositive/spark/udf_percentile.q.out index 3f8890b..eba7dbe 100644 --- a/ql/src/test/results/clientpositive/spark/udf_percentile.q.out +++ b/ql/src/test/results/clientpositive/spark/udf_percentile.q.out @@ -22,9 +22,7 @@ GROUP BY CAST(key AS INT) DIV 10 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT CAST(key AS INT) DIV 10, +POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, percentile(CAST(substr(value, 5) AS INT), 0.0), percentile(CAST(substr(value, 5) AS INT), 0.5), percentile(CAST(substr(value, 5) AS INT), 1.0), @@ -294,8 +292,7 @@ POSTHOOK: Input: default@src 7 70.0 73.0 78.0 [70.0,73.0,77.91000000000001,78.0] 8 80.0 84.0 87.0 [80.0,84.0,86.92,87.0] 9 90.0 95.0 98.0 [90.0,95.0,98.0,98.0] -PREHOOK: query: -- test null handling -SELECT CAST(key AS INT) DIV 10, +PREHOOK: query: SELECT CAST(key AS INT) DIV 10, percentile(NULL, 0.0), percentile(NULL, array(0.0, 0.5, 0.99, 1.0)) FROM src @@ -303,8 +300,7 @@ GROUP BY CAST(key AS INT) DIV 10 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- test null handling -SELECT CAST(key AS INT) DIV 10, +POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, percentile(NULL, 0.0), percentile(NULL, array(0.0, 0.5, 0.99, 1.0)) FROM src @@ -362,8 +358,7 @@ POSTHOOK: Input: default@src 7 NULL NULL 8 NULL NULL 9 NULL NULL -PREHOOK: query: -- test empty array handling -SELECT CAST(key AS INT) DIV 10, +PREHOOK: query: SELECT CAST(key AS INT) DIV 10, percentile(IF(CAST(key AS INT) DIV 10 < 5, 1, NULL), 0.5), percentile(IF(CAST(key AS INT) DIV 10 < 5, 1, NULL), array(0.0, 0.5, 0.99, 1.0)) FROM src @@ -371,8 +366,7 @@ GROUP BY CAST(key AS INT) DIV 10 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- test empty array handling -SELECT CAST(key AS INT) DIV 10, +POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, percentile(IF(CAST(key AS INT) DIV 10 < 5, 1, NULL), 0.5), percentile(IF(CAST(key AS INT) DIV 10 < 5, 1, NULL), array(0.0, 0.5, 0.99, 1.0)) FROM src @@ -439,13 +433,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### NULL -PREHOOK: query: -- test where percentile list is empty -select percentile(cast(key as bigint), array()) from src where false +PREHOOK: query: select percentile(cast(key as bigint), array()) from src where false PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- test where percentile list is empty -select percentile(cast(key as bigint), array()) from src where false +POSTHOOK: query: select percentile(cast(key as bigint), array()) from src where false POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union.q.out b/ql/src/test/results/clientpositive/spark/union.q.out index a78504f..fb1ad65 100644 --- a/ql/src/test/results/clientpositive/spark/union.q.out +++ b/ql/src/test/results/clientpositive/spark/union.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- union case: both subqueries are map jobs on same input, followed by filesink - -EXPLAIN +PREHOOK: query: EXPLAIN FROM ( FROM src select src.key, src.value WHERE src.key < 100 UNION ALL @@ -9,10 +6,7 @@ FROM ( ) unioninput INSERT OVERWRITE DIRECTORY 'target/warehouse/union.out' SELECT unioninput.* PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF --- union case: both subqueries are map jobs on same input, followed by filesink - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM ( FROM src select src.key, src.value WHERE src.key < 100 UNION ALL diff --git a/ql/src/test/results/clientpositive/spark/union10.q.out b/ql/src/test/results/clientpositive/spark/union10.q.out index ec06fde..2b3afec 100644 --- a/ql/src/test/results/clientpositive/spark/union10.q.out +++ b/ql/src/test/results/clientpositive/spark/union10.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- union case: all subqueries are a map-reduce jobs, 3 way union, same input for all sub-queries, followed by filesink - -create table tmptable(key string, value int) +PREHOOK: query: create table tmptable(key string, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- union case: all subqueries are a map-reduce jobs, 3 way union, same input for all sub-queries, followed by filesink - -create table tmptable(key string, value int) +POSTHOOK: query: create table tmptable(key string, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable diff --git a/ql/src/test/results/clientpositive/spark/union11.q.out b/ql/src/test/results/clientpositive/spark/union11.q.out index 882c0d1..ff8cca1 100644 --- a/ql/src/test/results/clientpositive/spark/union11.q.out +++ b/ql/src/test/results/clientpositive/spark/union11.q.out @@ -1,17 +1,11 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- union case: all subqueries are a map-reduce jobs, 3 way union, same input for all sub-queries, followed by reducesink - -explain +PREHOOK: query: explain select unionsrc.key, count(1) FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2 UNION ALL select 'tst3' as key, count(1) as value from src s3) unionsrc group by unionsrc.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF --- union case: all subqueries are a map-reduce jobs, 3 way union, same input for all sub-queries, followed by reducesink - -explain +POSTHOOK: query: explain select unionsrc.key, count(1) FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2 diff --git a/ql/src/test/results/clientpositive/spark/union12.q.out b/ql/src/test/results/clientpositive/spark/union12.q.out index cf2c7b7..e9cd26c 100644 --- a/ql/src/test/results/clientpositive/spark/union12.q.out +++ b/ql/src/test/results/clientpositive/spark/union12.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- union case: all subqueries are a map-reduce jobs, 3 way union, different inputs for all sub-queries, followed by filesink - -create table tmptable(key string, value int) +PREHOOK: query: create table tmptable(key string, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- union case: all subqueries are a map-reduce jobs, 3 way union, different inputs for all sub-queries, followed by filesink - -create table tmptable(key string, value int) +POSTHOOK: query: create table tmptable(key string, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable diff --git a/ql/src/test/results/clientpositive/spark/union13.q.out b/ql/src/test/results/clientpositive/spark/union13.q.out index bd30ba3..626863c 100644 --- a/ql/src/test/results/clientpositive/spark/union13.q.out +++ b/ql/src/test/results/clientpositive/spark/union13.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- union case: both subqueries are a map-only jobs, same input, followed by filesink - -explain +PREHOOK: query: explain select unionsrc.key, unionsrc.value FROM (select s1.key as key, s1.value as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF --- union case: both subqueries are a map-only jobs, same input, followed by filesink - -explain +POSTHOOK: query: explain select unionsrc.key, unionsrc.value FROM (select s1.key as key, s1.value as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/union14.q.out b/ql/src/test/results/clientpositive/spark/union14.q.out index 64756e6..0b99f68 100644 --- a/ql/src/test/results/clientpositive/spark/union14.q.out +++ b/ql/src/test/results/clientpositive/spark/union14.q.out @@ -1,16 +1,10 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by reducesink - -explain +PREHOOK: query: explain select unionsrc.key, count(1) FROM (select s2.key as key, s2.value as value from src1 s2 UNION ALL select 'tst1' as key, cast(count(1) as string) as value from src s1) unionsrc group by unionsrc.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF --- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by reducesink - -explain +POSTHOOK: query: explain select unionsrc.key, count(1) FROM (select s2.key as key, s2.value as value from src1 s2 UNION ALL select 'tst1' as key, cast(count(1) as string) as value from src s1) @@ -24,8 +18,8 @@ STAGE PLANS: Stage: Stage-1 Spark Edges: - Reducer 4 <- Map 3 (GROUP, 1) Reducer 2 <- Map 1 (GROUP, 2), Reducer 4 (GROUP, 2) + Reducer 4 <- Map 3 (GROUP, 1) #### A masked pattern was here #### Vertices: Map 1 @@ -128,13 +122,6 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### -238 1 -401 1 -98 1 -146 1 -311 1 -128 1 -278 1 273 1 224 1 255 1 @@ -145,3 +132,10 @@ tst1 1 66 1 406 1 150 1 +238 1 +401 1 +98 1 +146 1 +311 1 +128 1 +278 1 diff --git a/ql/src/test/results/clientpositive/spark/union15.q.out b/ql/src/test/results/clientpositive/spark/union15.q.out index 0c5a89b..339bcbb 100644 --- a/ql/src/test/results/clientpositive/spark/union15.q.out +++ b/ql/src/test/results/clientpositive/spark/union15.q.out @@ -1,17 +1,11 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by reducesink - -explain +PREHOOK: query: explain select unionsrc.key, count(1) FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src1 s2 UNION ALL select s3.key as key, s3.value as value from src1 s3) unionsrc group by unionsrc.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF --- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by reducesink - -explain +POSTHOOK: query: explain select unionsrc.key, count(1) FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src1 s2 diff --git a/ql/src/test/results/clientpositive/spark/union16.q.out b/ql/src/test/results/clientpositive/spark/union16.q.out index 7d27a5d..a150893 100644 --- a/ql/src/test/results/clientpositive/spark/union16.q.out +++ b/ql/src/test/results/clientpositive/spark/union16.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF -EXPLAIN +PREHOOK: query: EXPLAIN SELECT count(1) FROM ( SELECT key, value FROM src UNION ALL SELECT key, value FROM src UNION ALL @@ -31,8 +30,7 @@ SELECT count(1) FROM ( SELECT key, value FROM src UNION ALL SELECT key, value FROM src) src PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT count(1) FROM ( SELECT key, value FROM src UNION ALL SELECT key, value FROM src UNION ALL diff --git a/ql/src/test/results/clientpositive/spark/union17.q.out b/ql/src/test/results/clientpositive/spark/union17.q.out index ce23773..0dfaab4 100644 --- a/ql/src/test/results/clientpositive/spark/union17.q.out +++ b/ql/src/test/results/clientpositive/spark/union17.q.out @@ -14,20 +14,14 @@ POSTHOOK: query: CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST2 -PREHOOK: query: -- SORT_QUERY_RESULTS --- union case:map-reduce sub-queries followed by multi-table insert - -explain +PREHOOK: query: explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS --- union case:map-reduce sub-queries followed by multi-table insert - -explain +POSTHOOK: query: explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc diff --git a/ql/src/test/results/clientpositive/spark/union18.q.out b/ql/src/test/results/clientpositive/spark/union18.q.out index f9a28bb..1eb5cb2 100644 --- a/ql/src/test/results/clientpositive/spark/union18.q.out +++ b/ql/src/test/results/clientpositive/spark/union18.q.out @@ -14,22 +14,14 @@ POSTHOOK: query: CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST2 -PREHOOK: query: -- SORT_QUERY_RESULTS - --- union case:map-reduce sub-queries followed by multi-table insert - -explain +PREHOOK: query: explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, unionsrc.value INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, unionsrc.value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- union case:map-reduce sub-queries followed by multi-table insert - -explain +POSTHOOK: query: explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc diff --git a/ql/src/test/results/clientpositive/spark/union19.q.out b/ql/src/test/results/clientpositive/spark/union19.q.out index d81c19b..9ff5e7f 100644 --- a/ql/src/test/results/clientpositive/spark/union19.q.out +++ b/ql/src/test/results/clientpositive/spark/union19.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 @@ -18,18 +14,14 @@ POSTHOOK: query: CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST2 -PREHOOK: query: -- union case:map-reduce sub-queries followed by multi-table insert - -explain +PREHOOK: query: explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, count(unionsrc.value) group by unionsrc.key INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, unionsrc.value PREHOOK: type: QUERY -POSTHOOK: query: -- union case:map-reduce sub-queries followed by multi-table insert - -explain +POSTHOOK: query: explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc diff --git a/ql/src/test/results/clientpositive/spark/union2.q.out b/ql/src/test/results/clientpositive/spark/union2.q.out index 8008b3b..f8ccc49 100644 --- a/ql/src/test/results/clientpositive/spark/union2.q.out +++ b/ql/src/test/results/clientpositive/spark/union2.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- union case: both subqueries are map-reduce jobs on same input, followed by reduce sink - -explain +PREHOOK: query: explain select count(1) FROM (select s1.key as key, s1.value as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF --- union case: both subqueries are map-reduce jobs on same input, followed by reduce sink - -explain +POSTHOOK: query: explain select count(1) FROM (select s1.key as key, s1.value as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/union20.q.out b/ql/src/test/results/clientpositive/spark/union20.q.out index 719784a..2141861 100644 --- a/ql/src/test/results/clientpositive/spark/union20.q.out +++ b/ql/src/test/results/clientpositive/spark/union20.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- union :map-reduce sub-queries followed by join - -explain +PREHOOK: query: explain SELECT unionsrc1.key, unionsrc1.value, unionsrc2.key, unionsrc2.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL @@ -12,10 +9,7 @@ JOIN select s4.key as key, s4.value as value from src s4 where s4.key < 10) unionsrc2 ON (unionsrc1.key = unionsrc2.key) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS --- union :map-reduce sub-queries followed by join - -explain +POSTHOOK: query: explain SELECT unionsrc1.key, unionsrc1.value, unionsrc2.key, unionsrc2.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL diff --git a/ql/src/test/results/clientpositive/spark/union21.q.out b/ql/src/test/results/clientpositive/spark/union21.q.out index 4957834..908221a 100644 --- a/ql/src/test/results/clientpositive/spark/union21.q.out +++ b/ql/src/test/results/clientpositive/spark/union21.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- union of constants, udf outputs, and columns from text table and thrift table - -explain +PREHOOK: query: explain SELECT key, count(1) FROM ( SELECT '1' as key from src @@ -16,10 +13,7 @@ FROM ( ) union_output GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS --- union of constants, udf outputs, and columns from text table and thrift table - -explain +POSTHOOK: query: explain SELECT key, count(1) FROM ( SELECT '1' as key from src diff --git a/ql/src/test/results/clientpositive/spark/union22.q.out b/ql/src/test/results/clientpositive/spark/union22.q.out index e76d51e..f8c2235 100644 --- a/ql/src/test/results/clientpositive/spark/union22.q.out +++ b/ql/src/test/results/clientpositive/spark/union22.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table dst_union22(k1 string, k2 string, k3 string, k4 string) partitioned by (ds string) +PREHOOK: query: create table dst_union22(k1 string, k2 string, k3 string, k4 string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dst_union22 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table dst_union22(k1 string, k2 string, k3 string, k4 string) partitioned by (ds string) +POSTHOOK: query: create table dst_union22(k1 string, k2 string, k3 string, k4 string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dst_union22 @@ -48,9 +44,7 @@ POSTHOOK: Lineage: dst_union22_delta PARTITION(ds=1).k2 SIMPLE [(src)src.FieldSc POSTHOOK: Lineage: dst_union22_delta PARTITION(ds=1).k3 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: dst_union22_delta PARTITION(ds=1).k4 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: dst_union22_delta PARTITION(ds=1).k5 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -explain extended +PREHOOK: query: explain extended insert overwrite table dst_union22 partition (ds='2') select * from ( @@ -63,9 +57,7 @@ where a.k1 > 20 ) subq PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -explain extended +POSTHOOK: query: explain extended insert overwrite table dst_union22 partition (ds='2') select * from ( diff --git a/ql/src/test/results/clientpositive/spark/union24.q.out b/ql/src/test/results/clientpositive/spark/union24.q.out index 5032630..e0523e7 100644 --- a/ql/src/test/results/clientpositive/spark/union24.q.out +++ b/ql/src/test/results/clientpositive/spark/union24.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table src2 as select key, count(1) as count from src group by key +PREHOOK: query: create table src2 as select key, count(1) as count from src group by key PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@src2 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table src2 as select key, count(1) as count from src group by key +POSTHOOK: query: create table src2 as select key, count(1) as count from src group by key POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/spark/union26.q.out b/ql/src/test/results/clientpositive/spark/union26.q.out index 0b13d4a..e4de33d 100644 --- a/ql/src/test/results/clientpositive/spark/union26.q.out +++ b/ql/src/test/results/clientpositive/spark/union26.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT count(1) as counts, key, @@ -22,9 +20,7 @@ WHERE ds='2008-04-08' and hr='11' ) a group by key, value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT count(1) as counts, key, diff --git a/ql/src/test/results/clientpositive/spark/union27.q.out b/ql/src/test/results/clientpositive/spark/union27.q.out index bccbbb1..8d5d69a 100644 --- a/ql/src/test/results/clientpositive/spark/union27.q.out +++ b/ql/src/test/results/clientpositive/spark/union27.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS -create table jackson_sev_same as select * from src +PREHOOK: query: create table jackson_sev_same as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@jackson_sev_same -POSTHOOK: query: -- SORT_QUERY_RESULTS -create table jackson_sev_same as select * from src +POSTHOOK: query: create table jackson_sev_same as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/spark/union3.q.out b/ql/src/test/results/clientpositive/spark/union3.q.out index 2b6c25e..1bf9571 100644 --- a/ql/src/test/results/clientpositive/spark/union3.q.out +++ b/ql/src/test/results/clientpositive/spark/union3.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain SELECT * FROM ( SELECT 1 AS id @@ -17,9 +15,7 @@ FROM ( CLUSTER BY id ) a PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain SELECT * FROM ( SELECT 1 AS id diff --git a/ql/src/test/results/clientpositive/spark/union31.q.out b/ql/src/test/results/clientpositive/spark/union31.q.out index a1f29eb..7962692 100644 --- a/ql/src/test/results/clientpositive/spark/union31.q.out +++ b/ql/src/test/results/clientpositive/spark/union31.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -drop table t1 +PREHOOK: query: drop table t1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -drop table t1 +POSTHOOK: query: drop table t1 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table t2 PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/spark/union32.q.out b/ql/src/test/results/clientpositive/spark/union32.q.out index 755e936..5c7759a 100644 --- a/ql/src/test/results/clientpositive/spark/union32.q.out +++ b/ql/src/test/results/clientpositive/spark/union32.q.out @@ -1,19 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- This tests various union queries which have columns on one side of the query --- being of double type and those on the other side another - -CREATE TABLE t1 AS SELECT * FROM src WHERE key < 10 +PREHOOK: query: CREATE TABLE t1 AS SELECT * FROM src WHERE key < 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- This tests various union queries which have columns on one side of the query --- being of double type and those on the other side another - -CREATE TABLE t1 AS SELECT * FROM src WHERE key < 10 +POSTHOOK: query: CREATE TABLE t1 AS SELECT * FROM src WHERE key < 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default @@ -32,15 +22,13 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@t2 POSTHOOK: Lineage: t2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: t2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Test simple union with double -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(key AS DOUBLE) AS key FROM t1 UNION ALL SELECT CAST(key AS BIGINT) AS key FROM t2) a PREHOOK: type: QUERY -POSTHOOK: query: -- Test simple union with double -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(key AS DOUBLE) AS key FROM t1 UNION ALL @@ -130,15 +118,13 @@ POSTHOOK: Input: default@t2 8.0 9.0 9.0 -PREHOOK: query: -- Test union with join on the left -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key UNION ALL SELECT CAST(key AS DOUBLE) AS key FROM t2) a PREHOOK: type: QUERY -POSTHOOK: query: -- Test union with join on the left -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key UNION ALL @@ -278,15 +264,13 @@ POSTHOOK: Input: default@t2 8.0 9.0 9.0 -PREHOOK: query: -- Test union with join on the right -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(key AS DOUBLE) AS key FROM t2 UNION ALL SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key) a PREHOOK: type: QUERY -POSTHOOK: query: -- Test union with join on the right -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(key AS DOUBLE) AS key FROM t2 UNION ALL @@ -426,15 +410,13 @@ POSTHOOK: Input: default@t2 8.0 9.0 9.0 -PREHOOK: query: -- Test union with join on the left selecting multiple columns -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key UNION ALL SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2) a PREHOOK: type: QUERY -POSTHOOK: query: -- Test union with join on the left selecting multiple columns -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key UNION ALL @@ -574,15 +556,13 @@ POSTHOOK: Input: default@t2 8.0 8 9.0 9 9.0 9 -PREHOOK: query: -- Test union with join on the right selecting multiple columns -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2 UNION ALL SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key) a PREHOOK: type: QUERY -POSTHOOK: query: -- Test union with join on the right selecting multiple columns -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2 UNION ALL diff --git a/ql/src/test/results/clientpositive/spark/union33.q.out b/ql/src/test/results/clientpositive/spark/union33.q.out index d0d2567..def5f69 100644 --- a/ql/src/test/results/clientpositive/spark/union33.q.out +++ b/ql/src/test/results/clientpositive/spark/union33.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- This tests that a union all with a map only subquery on one side and a --- subquery involving two map reduce jobs on the other runs correctly. - -CREATE TABLE test_src (key STRING, value STRING) +PREHOOK: query: CREATE TABLE test_src (key STRING, value STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_src -POSTHOOK: query: -- SORT_BEFORE_DIFF --- This tests that a union all with a map only subquery on one side and a --- subquery involving two map reduce jobs on the other runs correctly. - -CREATE TABLE test_src (key STRING, value STRING) +POSTHOOK: query: CREATE TABLE test_src (key STRING, value STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_src diff --git a/ql/src/test/results/clientpositive/spark/union34.q.out b/ql/src/test/results/clientpositive/spark/union34.q.out index c49c436..23cf6ee 100644 --- a/ql/src/test/results/clientpositive/spark/union34.q.out +++ b/ql/src/test/results/clientpositive/spark/union34.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS -create table src10_1 (key string, value string) +PREHOOK: query: create table src10_1 (key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src10_1 -POSTHOOK: query: -- SORT_QUERY_RESULTS -create table src10_1 (key string, value string) +POSTHOOK: query: create table src10_1 (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src10_1 @@ -62,18 +60,14 @@ POSTHOOK: Lineage: src10_3.key SIMPLE [(src)src.FieldSchema(name:key, type:strin POSTHOOK: Lineage: src10_3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: src10_4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: src10_4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- When we convert the Join of sub1 and sub0 into a MapJoin, --- we can use a single MR job to evaluate this entire query. -explain +PREHOOK: query: explain SELECT * FROM ( SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) UNION ALL SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3) sub2 UNION ALL SELECT * FROM src10_4 ) alias0 ) alias1 PREHOOK: type: QUERY -POSTHOOK: query: -- When we convert the Join of sub1 and sub0 into a MapJoin, --- we can use a single MR job to evaluate this entire query. -explain +POSTHOOK: query: explain SELECT * FROM ( SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) UNION ALL @@ -235,22 +229,14 @@ POSTHOOK: Input: default@src10_4 98 val_98 98 val_98 98 val_98 -PREHOOK: query: -- When we do not convert the Join of sub1 and sub0 into a MapJoin, --- we need to use two MR jobs to evaluate this query. --- The first job is for the Join of sub1 and sub2. The second job --- is for the UNION ALL and ORDER BY. -explain +PREHOOK: query: explain SELECT * FROM ( SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) UNION ALL SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3) sub2 UNION ALL SELECT * FROM src10_4 ) alias0 ) alias1 PREHOOK: type: QUERY -POSTHOOK: query: -- When we do not convert the Join of sub1 and sub0 into a MapJoin, --- we need to use two MR jobs to evaluate this query. --- The first job is for the Join of sub1 and sub2. The second job --- is for the UNION ALL and ORDER BY. -explain +POSTHOOK: query: explain SELECT * FROM ( SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) UNION ALL diff --git a/ql/src/test/results/clientpositive/spark/union4.q.out b/ql/src/test/results/clientpositive/spark/union4.q.out index 7530209..45705e9 100644 --- a/ql/src/test/results/clientpositive/spark/union4.q.out +++ b/ql/src/test/results/clientpositive/spark/union4.q.out @@ -1,18 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- union case: both subqueries are map-reduce jobs on same input, followed by filesink - - -create table tmptable(key string, value int) +PREHOOK: query: create table tmptable(key string, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- union case: both subqueries are map-reduce jobs on same input, followed by filesink - - -create table tmptable(key string, value int) +POSTHOOK: query: create table tmptable(key string, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable diff --git a/ql/src/test/results/clientpositive/spark/union5.q.out b/ql/src/test/results/clientpositive/spark/union5.q.out index 6da2889..111caa2 100644 --- a/ql/src/test/results/clientpositive/spark/union5.q.out +++ b/ql/src/test/results/clientpositive/spark/union5.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- union case: both subqueries are map-reduce jobs on same input, followed by reduce sink - -explain +PREHOOK: query: explain select unionsrc.key, count(1) FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2) unionsrc group by unionsrc.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF --- union case: both subqueries are map-reduce jobs on same input, followed by reduce sink - -explain +POSTHOOK: query: explain select unionsrc.key, count(1) FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2) unionsrc group by unionsrc.key diff --git a/ql/src/test/results/clientpositive/spark/union6.q.out b/ql/src/test/results/clientpositive/spark/union6.q.out index 6176beb..d419c9a 100644 --- a/ql/src/test/results/clientpositive/spark/union6.q.out +++ b/ql/src/test/results/clientpositive/spark/union6.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by filesink - -create table tmptable(key string, value string) +PREHOOK: query: create table tmptable(key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by filesink - -create table tmptable(key string, value string) +POSTHOOK: query: create table tmptable(key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable diff --git a/ql/src/test/results/clientpositive/spark/union7.q.out b/ql/src/test/results/clientpositive/spark/union7.q.out index 552fd2d..6cc5d6d 100644 --- a/ql/src/test/results/clientpositive/spark/union7.q.out +++ b/ql/src/test/results/clientpositive/spark/union7.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by reducesink - -explain +PREHOOK: query: explain select unionsrc.key, count(1) FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src1 s2) unionsrc group by unionsrc.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF --- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by reducesink - -explain +POSTHOOK: query: explain select unionsrc.key, count(1) FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src1 s2) unionsrc group by unionsrc.key @@ -124,13 +118,6 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Input: default@src1 #### A masked pattern was here #### -238 1 -401 1 -98 1 -146 1 -311 1 -128 1 -278 1 273 1 224 1 255 1 @@ -141,3 +128,10 @@ tst1 1 66 1 406 1 150 1 +238 1 +401 1 +98 1 +146 1 +311 1 +128 1 +278 1 diff --git a/ql/src/test/results/clientpositive/spark/union9.q.out b/ql/src/test/results/clientpositive/spark/union9.q.out index 2a0c6a0..53c6e7d 100644 --- a/ql/src/test/results/clientpositive/spark/union9.q.out +++ b/ql/src/test/results/clientpositive/spark/union9.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- union case: all subqueries are a map-only jobs, 3 way union, same input for all sub-queries, followed by reducesink - -explain +PREHOOK: query: explain select count(1) FROM (select s1.key as key, s1.value as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2 UNION ALL select s3.key as key, s3.value as value from src s3) unionsrc PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF --- union case: all subqueries are a map-only jobs, 3 way union, same input for all sub-queries, followed by reducesink - -explain +POSTHOOK: query: explain select count(1) FROM (select s1.key as key, s1.value as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2 UNION ALL select s3.key as key, s3.value as value from src s3) unionsrc diff --git a/ql/src/test/results/clientpositive/spark/union_date.q.out b/ql/src/test/results/clientpositive/spark/union_date.q.out index 7ac5c1c..d5a24d5 100644 --- a/ql/src/test/results/clientpositive/spark/union_date.q.out +++ b/ql/src/test/results/clientpositive/spark/union_date.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -drop table union_date_1 +PREHOOK: query: drop table union_date_1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -drop table union_date_1 +POSTHOOK: query: drop table union_date_1 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table union_date_2 PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/spark/union_date_trim.q.out b/ql/src/test/results/clientpositive/spark/union_date_trim.q.out index daa7987..a51a5ac 100644 --- a/ql/src/test/results/clientpositive/spark/union_date_trim.q.out +++ b/ql/src/test/results/clientpositive/spark/union_date_trim.q.out @@ -40,13 +40,11 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@testdate POSTHOOK: Lineage: testdate.dt EXPRESSION [] POSTHOOK: Lineage: testdate.id SIMPLE [] -PREHOOK: query: --- without the fix following query will throw HiveException: Incompatible types for union operator -insert into table testDate select id, tm from (select id, dt as tm from testDate where id = 1 union all select id, dt as tm from testDate where id = 2 union all select id, cast(trim(Cast (dt as string)) as date) as tm from testDate where id = 3 ) a +PREHOOK: query: insert into table testDate select id, tm from (select id, dt as tm from testDate where id = 1 union all select id, dt as tm from testDate where id = 2 union all select id, cast(trim(Cast (dt as string)) as date) as tm from testDate where id = 3 ) a PREHOOK: type: QUERY PREHOOK: Input: default@testdate PREHOOK: Output: default@testdate -POSTHOOK: query: --- without the fix following query will throw HiveException: Incompatible types for union operator -insert into table testDate select id, tm from (select id, dt as tm from testDate where id = 1 union all select id, dt as tm from testDate where id = 2 union all select id, cast(trim(Cast (dt as string)) as date) as tm from testDate where id = 3 ) a +POSTHOOK: query: insert into table testDate select id, tm from (select id, dt as tm from testDate where id = 1 union all select id, dt as tm from testDate where id = 2 union all select id, cast(trim(Cast (dt as string)) as date) as tm from testDate where id = 3 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@testdate POSTHOOK: Output: default@testdate diff --git a/ql/src/test/results/clientpositive/spark/union_null.q.out b/ql/src/test/results/clientpositive/spark/union_null.q.out index 6d06e1d..e196ff3 100644 --- a/ql/src/test/results/clientpositive/spark/union_null.q.out +++ b/ql/src/test/results/clientpositive/spark/union_null.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF - --- HIVE-2901 -select x from (select * from (select value as x from src order by x limit 5)a union all select * from (select cast(NULL as string) as x from src limit 5)b )a +PREHOOK: query: select x from (select * from (select value as x from src order by x limit 5)a union all select * from (select cast(NULL as string) as x from src limit 5)b )a PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- SORT_BEFORE_DIFF - --- HIVE-2901 -select x from (select * from (select value as x from src order by x limit 5)a union all select * from (select cast(NULL as string) as x from src limit 5)b )a +POSTHOOK: query: select x from (select * from (select value as x from src order by x limit 5)a union all select * from (select cast(NULL as string) as x from src limit 5)b )a POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### @@ -30,23 +24,21 @@ POSTHOOK: query: select x from (select * from (select value as x from src order POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -val_0 -val_0 -val_0 -val_10 -val_100 NULL NULL NULL NULL NULL -PREHOOK: query: -- HIVE-4837 -select * from (select * from (select cast(null as string) as N from src1 group by key)a UNION ALL select * from (select cast(null as string) as N from src1 group by key)b ) a +val_0 +val_0 +val_0 +val_10 +val_100 +PREHOOK: query: select * from (select * from (select cast(null as string) as N from src1 group by key)a UNION ALL select * from (select cast(null as string) as N from src1 group by key)b ) a PREHOOK: type: QUERY PREHOOK: Input: default@src1 #### A masked pattern was here #### -POSTHOOK: query: -- HIVE-4837 -select * from (select * from (select cast(null as string) as N from src1 group by key)a UNION ALL select * from (select cast(null as string) as N from src1 group by key)b ) a +POSTHOOK: query: select * from (select * from (select cast(null as string) as N from src1 group by key)a UNION ALL select * from (select cast(null as string) as N from src1 group by key)b ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/union_ppr.q.out b/ql/src/test/results/clientpositive/spark/union_ppr.q.out index 01747c6..d3149e1 100644 --- a/ql/src/test/results/clientpositive/spark/union_ppr.q.out +++ b/ql/src/test/results/clientpositive/spark/union_ppr.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM ( SELECT X.* FROM SRCPART X WHERE X.key < 100 UNION ALL @@ -9,9 +7,7 @@ SELECT * FROM ( WHERE A.ds = '2008-04-08' SORT BY A.key, A.value, A.ds, A.hr PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM ( SELECT X.* FROM SRCPART X WHERE X.key < 100 UNION ALL diff --git a/ql/src/test/results/clientpositive/spark/union_remove_1.q.out b/ql/src/test/results/clientpositive/spark/union_remove_1.q.out index ff18e4a..384079e 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_1.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_1.q.out @@ -1,30 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_10.q.out b/ql/src/test/results/clientpositive/spark/union_remove_10.q.out index ded8b40..5ccad1c 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_10.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_10.q.out @@ -1,38 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a map-only query, and the --- other one contains a nested union where one of the sub-queries requires a map-reduce --- job), followed by select star and a file sink. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The outer union can be removed completely. --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a map-only query, and the --- other one contains a nested union where one of the sub-queries requires a map-reduce --- job), followed by select star and a file sink. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The outer union can be removed completely. --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_11.q.out b/ql/src/test/results/clientpositive/spark/union_remove_11.q.out index 0fe1340..8aeb902 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_11.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_11.q.out @@ -1,38 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a map-only query, and the --- other one contains a nested union where also contains map only sub-queries), --- followed by select star and a file sink. --- There is no need for the union optimization, since the whole query can be performed --- in a single map-only job --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a map-only query, and the --- other one contains a nested union where also contains map only sub-queries), --- followed by select star and a file sink. --- There is no need for the union optimization, since the whole query can be performed --- in a single map-only job --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_12.q.out b/ql/src/test/results/clientpositive/spark/union_remove_12.q.out index 8ca1432..020497f 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_12.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_12.q.out @@ -1,36 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a map-only query, and the --- other one is a map-join query), followed by select star and a file sink. --- The union optimization is applied, and the union is removed. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a map-only query, and the --- other one is a map-join query), followed by select star and a file sink. --- The union optimization is applied, and the union is removed. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_13.q.out b/ql/src/test/results/clientpositive/spark/union_remove_13.q.out index 2954f7b..f4107e7 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_13.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_13.q.out @@ -1,36 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a mapred query, and the --- other one is a map-join query), followed by select star and a file sink. --- The union selectstar optimization should be performed, and the union should be removed. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a mapred query, and the --- other one is a map-join query), followed by select star and a file sink. --- The union selectstar optimization should be performed, and the union should be removed. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_14.q.out b/ql/src/test/results/clientpositive/spark/union_remove_14.q.out index 07bd1bb..020497f 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_14.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_14.q.out @@ -1,38 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a map-only query, and the --- other one contains a join, which should be performed as a map-join query at runtime), --- followed by select star and a file sink. --- The union selectstar optimization should be performed, and the union should be removed. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a map-only query, and the --- other one contains a join, which should be performed as a map-join query at runtime), --- followed by select star and a file sink. --- The union selectstar optimization should be performed, and the union should be removed. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_15.q.out b/ql/src/test/results/clientpositive/spark/union_remove_15.q.out index c4d1542..658edea 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_15.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_15.q.out @@ -1,36 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- and the results are written to a table using dynamic partitions. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- This tests demonstrates that this optimization works in the presence of dynamic partitions. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- and the results are written to a table using dynamic partitions. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- This tests demonstrates that this optimization works in the presence of dynamic partitions. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_16.q.out b/ql/src/test/results/clientpositive/spark/union_remove_16.q.out index 961291b..6ddc9e2 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_16.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_16.q.out @@ -1,34 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- and the results are written to a table using dynamic partitions. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on --- This test demonstrates that this optimization works in the presence of dynamic partitions. --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- and the results are written to a table using dynamic partitions. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on --- This test demonstrates that this optimization works in the presence of dynamic partitions. --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_17.q.out b/ql/src/test/results/clientpositive/spark/union_remove_17.q.out index baed263..3f16384 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_17.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_17.q.out @@ -1,30 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- and the results are written to a table using dynamic partitions. --- There is no need for this optimization, since the query is a map-only query. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- and the results are written to a table using dynamic partitions. --- There is no need for this optimization, since the query is a map-only query. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_18.q.out b/ql/src/test/results/clientpositive/spark/union_remove_18.q.out index 535d085..87d3eca 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_18.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_18.q.out @@ -1,34 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- This test demonstrates that the optimization works with dynamic partitions irrespective of the --- file format of the output file --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, ds string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, ds string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- This test demonstrates that the optimization works with dynamic partitions irrespective of the --- file format of the output file --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, ds string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_19.q.out b/ql/src/test/results/clientpositive/spark/union_remove_19.q.out index 8c06c64..bfa7c14 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_19.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_19.q.out @@ -1,34 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - --- SORT_QUERY_RESULTS - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - --- SORT_QUERY_RESULTS - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 @@ -214,8 +188,7 @@ POSTHOOK: Input: default@outputtbl1 7 1 8 2 8 2 -PREHOOK: query: -- filter should be fine -explain +PREHOOK: query: explain insert overwrite table outputTbl1 SELECT a.key, a.`values` FROM ( @@ -224,8 +197,7 @@ FROM ( SELECT key, count(1) as `values` from inputTbl1 group by key ) a where a.key = 7 PREHOOK: type: QUERY -POSTHOOK: query: -- filter should be fine -explain +POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT a.key, a.`values` FROM ( @@ -345,8 +317,7 @@ POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### 7 1 7 1 -PREHOOK: query: -- filters and sub-queries should be fine -explain +PREHOOK: query: explain insert overwrite table outputTbl1 select key, `values` from ( @@ -358,8 +329,7 @@ FROM ( ) a ) b where b.key >= 7 PREHOOK: type: QUERY -POSTHOOK: query: -- filters and sub-queries should be fine -explain +POSTHOOK: query: explain insert overwrite table outputTbl1 select key, `values` from ( diff --git a/ql/src/test/results/clientpositive/spark/union_remove_2.q.out b/ql/src/test/results/clientpositive/spark/union_remove_2.q.out index 755ff3f..6dd8bab 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_2.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_2.q.out @@ -1,32 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 3 subqueries is performed (exactly one of which requires a map-reduce job) --- followed by select star and a file sink. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 3 subqueries is performed (exactly one of which requires a map-reduce job) --- followed by select star and a file sink. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_20.q.out b/ql/src/test/results/clientpositive/spark/union_remove_20.q.out index bec77a8..f1491de 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_20.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_20.q.out @@ -1,32 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select and a file sink --- However, the order of the columns in the select list is different. So, union cannot --- be removed. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23. The union is removed, the select (which changes the order of --- columns being selected) is pushed above the union. - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select and a file sink --- However, the order of the columns in the select list is different. So, union cannot --- be removed. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23. The union is removed, the select (which changes the order of --- columns being selected) is pushed above the union. - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_21.q.out b/ql/src/test/results/clientpositive/spark/union_remove_21.q.out index f5a1caa..d9fcf54 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_21.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_21.q.out @@ -1,32 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select and a file sink --- However, all the columns are not selected. So, union cannot --- be removed. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23. The union is removed, the select (which changes the order of --- columns being selected) is pushed above the union. - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select and a file sink --- However, all the columns are not selected. So, union cannot --- be removed. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23. The union is removed, the select (which changes the order of --- columns being selected) is pushed above the union. - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_22.q.out b/ql/src/test/results/clientpositive/spark/union_remove_22.q.out index c762002..af3ea4b 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_22.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_22.q.out @@ -1,32 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select and a file sink --- However, some columns are repeated. So, union cannot be removed. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23. The union is removed, the select (which selects columns from --- both the sub-qeuries of the union) is pushed above the union. - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select and a file sink --- However, some columns are repeated. So, union cannot be removed. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23. The union is removed, the select (which selects columns from --- both the sub-qeuries of the union) is pushed above the union. - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_23.q.out b/ql/src/test/results/clientpositive/spark/union_remove_23.q.out index b351893..2c21b0a 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_23.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_23.q.out @@ -1,32 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. One of the sub-queries --- would have multiple map-reduce jobs. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. One of the sub-queries --- would have multiple map-reduce jobs. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_24.q.out b/ql/src/test/results/clientpositive/spark/union_remove_24.q.out index c66691e..2a9e4f6 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_24.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_24.q.out @@ -1,28 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- One sub-query has a double and the other sub-query has a bigint. --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- One sub-query has a double and the other sub-query has a bigint. --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out index ba9d4d1..9fec1d4 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out @@ -1,30 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_3.q.out b/ql/src/test/results/clientpositive/spark/union_remove_3.q.out index 7692443..7531472 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_3.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_3.q.out @@ -1,32 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->remove->filesink optimization --- Union of 3 subqueries is performed (all of which are map-only queries) --- followed by select star and a file sink. --- There is no need for any optimization, since the whole query can be processed in --- a single map-only job --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->remove->filesink optimization --- Union of 3 subqueries is performed (all of which are map-only queries) --- followed by select star and a file sink. --- There is no need for any optimization, since the whole query can be processed in --- a single map-only job --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_4.q.out b/ql/src/test/results/clientpositive/spark/union_remove_4.q.out index 3ab62c8..33e8f51 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_4.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_4.q.out @@ -1,30 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_5.q.out b/ql/src/test/results/clientpositive/spark/union_remove_5.q.out index 9a9caf6..104ca01 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_5.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_5.q.out @@ -1,34 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 3 subqueries is performed (exactly one of which requires a map-reduce job) --- followed by select star and a file sink. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 3 subqueries is performed (exactly one of which requires a map-reduce job) --- followed by select star and a file sink. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_6.q.out b/ql/src/test/results/clientpositive/spark/union_remove_6.q.out index 1bdeb09..4c1a5e2 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_6.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_6.q.out @@ -1,24 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (all of which are mapred queries) --- followed by select star and a file sink in 2 output tables. --- The optimiaztion does not take affect since it is a multi-table insert. --- It does not matter, whether the output is merged or not. In this case, --- merging is turned off - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (all of which are mapred queries) --- followed by select star and a file sink in 2 output tables. --- The optimiaztion does not take affect since it is a multi-table insert. --- It does not matter, whether the output is merged or not. In this case, --- merging is turned off - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out b/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out index 10de07d..ac9b1b2 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out @@ -1,26 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (all of which are mapred queries) --- followed by select star and a file sink in 2 output tables. --- The optimiaztion does not take affect since it is a multi-table insert. --- It does not matter, whether the output is merged or not. In this case, --- merging is turned off - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (all of which are mapred queries) --- followed by select star and a file sink in 2 output tables. --- The optimiaztion does not take affect since it is a multi-table insert. --- It does not matter, whether the output is merged or not. In this case, --- merging is turned off - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 @@ -239,16 +221,14 @@ POSTHOOK: Input: default@outputtbl2 7 1 8 2 8 2 -PREHOOK: query: -- The following queries guarantee the correctness. -explain +PREHOOK: query: explain select avg(c) from( SELECT count(1)-200 as c from src UNION ALL SELECT count(1) as c from src )subq PREHOOK: type: QUERY -POSTHOOK: query: -- The following queries guarantee the correctness. -explain +POSTHOOK: query: explain select avg(c) from( SELECT count(1)-200 as c from src UNION ALL diff --git a/ql/src/test/results/clientpositive/spark/union_remove_7.q.out b/ql/src/test/results/clientpositive/spark/union_remove_7.q.out index 585b61e..cff02a7 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_7.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_7.q.out @@ -1,34 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_8.q.out b/ql/src/test/results/clientpositive/spark/union_remove_8.q.out index 17079a1..769e00f 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_8.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_8.q.out @@ -1,36 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 3 subqueries is performed (exactly one of which requires a map-reduce job) --- followed by select star and a file sink. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 3 subqueries is performed (exactly one of which requires a map-reduce job) --- followed by select star and a file sink. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_remove_9.q.out b/ql/src/test/results/clientpositive/spark/union_remove_9.q.out index 0b46bd7..ab557b1 100644 --- a/ql/src/test/results/clientpositive/spark/union_remove_9.q.out +++ b/ql/src/test/results/clientpositive/spark/union_remove_9.q.out @@ -1,34 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which contains a union and is map-only), --- and the other one is a map-reduce query followed by select star and a file sink. --- There is no need for the outer union. --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which contains a union and is map-only), --- and the other one is a map-reduce query followed by select star and a file sink. --- There is no need for the outer union. --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/spark/union_script.q.out b/ql/src/test/results/clientpositive/spark/union_script.q.out index 44ea01b..b8dfeb1 100644 --- a/ql/src/test/results/clientpositive/spark/union_script.q.out +++ b/ql/src/test/results/clientpositive/spark/union_script.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS -select * from ( +PREHOOK: query: select * from ( select transform(key) using 'cat' as cola from src)s PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS -select * from ( +POSTHOOK: query: select * from ( select transform(key) using 'cat' as cola from src)s POSTHOOK: type: QUERY POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/spark/union_top_level.q.out b/ql/src/test/results/clientpositive/spark/union_top_level.q.out index 9be5361..c9cb5d3 100644 --- a/ql/src/test/results/clientpositive/spark/union_top_level.q.out +++ b/ql/src/test/results/clientpositive/spark/union_top_level.q.out @@ -1,17 +1,11 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- top level -explain +PREHOOK: query: explain select * from (select key, 0 as value from src where key % 3 == 0 limit 3)a union all select * from (select key, 1 as value from src where key % 3 == 1 limit 3)b union all select * from (select key, 2 as value from src where key % 3 == 2 limit 3)c PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- top level -explain +POSTHOOK: query: explain select * from (select key, 0 as value from src where key % 3 == 0 limit 3)a union all select * from (select key, 1 as value from src where key % 3 == 1 limit 3)b @@ -334,8 +328,7 @@ POSTHOOK: Input: default@src 100 val_100 104 val_104 104 val_104 -PREHOOK: query: -- ctas -explain +PREHOOK: query: explain create table union_top as select * from (select key, 0 as value from src where key % 3 == 0 limit 3)a union all @@ -343,8 +336,7 @@ select * from (select key, 1 as value from src where key % 3 == 1 limit 3)b union all select * from (select key, 2 as value from src where key % 3 == 2 limit 3)c PREHOOK: type: CREATETABLE_AS_SELECT -POSTHOOK: query: -- ctas -explain +POSTHOOK: query: explain create table union_top as select * from (select key, 0 as value from src where key % 3 == 0 limit 3)a union all @@ -554,8 +546,7 @@ PREHOOK: Output: default@union_top POSTHOOK: query: truncate table union_top POSTHOOK: type: TRUNCATETABLE POSTHOOK: Output: default@union_top -PREHOOK: query: -- insert into -explain +PREHOOK: query: explain insert into table union_top select * from (select key, 0 as value from src where key % 3 == 0 limit 3)a union all @@ -563,8 +554,7 @@ select * from (select key, 1 as value from src where key % 3 == 1 limit 3)b union all select * from (select key, 2 as value from src where key % 3 == 2 limit 3)c PREHOOK: type: QUERY -POSTHOOK: query: -- insert into -explain +POSTHOOK: query: explain insert into table union_top select * from (select key, 0 as value from src where key % 3 == 0 limit 3)a union all @@ -964,8 +954,7 @@ POSTHOOK: Input: default@union_top 409 1 484 1 86 2 -PREHOOK: query: -- create view -explain +PREHOOK: query: explain create view union_top_view as select * from (select key, 0 as value from src where key % 3 == 0 limit 3)a union all @@ -973,8 +962,7 @@ select * from (select key, 1 as value from src where key % 3 == 1 limit 3)b union all select * from (select key, 2 as value from src where key % 3 == 2 limit 3)c PREHOOK: type: CREATEVIEW -POSTHOOK: query: -- create view -explain +POSTHOOK: query: explain create view union_top_view as select * from (select key, 0 as value from src where key % 3 == 0 limit 3)a union all diff --git a/ql/src/test/results/clientpositive/spark/uniquejoin.q.out b/ql/src/test/results/clientpositive/spark/uniquejoin.q.out index b71d5b1..fcfe4dc 100644 --- a/ql/src/test/results/clientpositive/spark/uniquejoin.q.out +++ b/ql/src/test/results/clientpositive/spark/uniquejoin.q.out @@ -46,18 +46,14 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/T3.txt' INTO TABLE T3 POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@t3 -PREHOOK: query: -- SORT_QUERY_RESULTS - -FROM UNIQUEJOIN PRESERVE T1 a (a.key), PRESERVE T2 b (b.key), PRESERVE T3 c (c.key) +PREHOOK: query: FROM UNIQUEJOIN PRESERVE T1 a (a.key), PRESERVE T2 b (b.key), PRESERVE T3 c (c.key) SELECT a.key, b.key, c.key PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 PREHOOK: Input: default@t3 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -FROM UNIQUEJOIN PRESERVE T1 a (a.key), PRESERVE T2 b (b.key), PRESERVE T3 c (c.key) +POSTHOOK: query: FROM UNIQUEJOIN PRESERVE T1 a (a.key), PRESERVE T2 b (b.key), PRESERVE T3 c (c.key) SELECT a.key, b.key, c.key POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 diff --git a/ql/src/test/results/clientpositive/spark/varchar_join1.q.out b/ql/src/test/results/clientpositive/spark/varchar_join1.q.out index 9736fb1..b433a68 100644 --- a/ql/src/test/results/clientpositive/spark/varchar_join1.q.out +++ b/ql/src/test/results/clientpositive/spark/varchar_join1.q.out @@ -76,27 +76,23 @@ POSTHOOK: query: load data local inpath '../../data/files/vc1.txt' into table va POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@varchar_join1_str -PREHOOK: query: -- Join varchar with same length varchar -select * from varchar_join1_vc1 a join varchar_join1_vc1 b on (a.c2 = b.c2) order by a.c1 +PREHOOK: query: select * from varchar_join1_vc1 a join varchar_join1_vc1 b on (a.c2 = b.c2) order by a.c1 PREHOOK: type: QUERY PREHOOK: Input: default@varchar_join1_vc1 #### A masked pattern was here #### -POSTHOOK: query: -- Join varchar with same length varchar -select * from varchar_join1_vc1 a join varchar_join1_vc1 b on (a.c2 = b.c2) order by a.c1 +POSTHOOK: query: select * from varchar_join1_vc1 a join varchar_join1_vc1 b on (a.c2 = b.c2) order by a.c1 POSTHOOK: type: QUERY POSTHOOK: Input: default@varchar_join1_vc1 #### A masked pattern was here #### 1 abc 1 abc 2 abc 2 abc 3 abc 3 abc -PREHOOK: query: -- Join varchar with different length varchar -select * from varchar_join1_vc1 a join varchar_join1_vc2 b on (a.c2 = b.c2) order by a.c1 +PREHOOK: query: select * from varchar_join1_vc1 a join varchar_join1_vc2 b on (a.c2 = b.c2) order by a.c1 PREHOOK: type: QUERY PREHOOK: Input: default@varchar_join1_vc1 PREHOOK: Input: default@varchar_join1_vc2 #### A masked pattern was here #### -POSTHOOK: query: -- Join varchar with different length varchar -select * from varchar_join1_vc1 a join varchar_join1_vc2 b on (a.c2 = b.c2) order by a.c1 +POSTHOOK: query: select * from varchar_join1_vc1 a join varchar_join1_vc2 b on (a.c2 = b.c2) order by a.c1 POSTHOOK: type: QUERY POSTHOOK: Input: default@varchar_join1_vc1 POSTHOOK: Input: default@varchar_join1_vc2 @@ -104,14 +100,12 @@ POSTHOOK: Input: default@varchar_join1_vc2 1 abc 1 abc 2 abc 2 abc 3 abc 3 abc -PREHOOK: query: -- Join varchar with string -select * from varchar_join1_vc1 a join varchar_join1_str b on (a.c2 = b.c2) order by a.c1 +PREHOOK: query: select * from varchar_join1_vc1 a join varchar_join1_str b on (a.c2 = b.c2) order by a.c1 PREHOOK: type: QUERY PREHOOK: Input: default@varchar_join1_str PREHOOK: Input: default@varchar_join1_vc1 #### A masked pattern was here #### -POSTHOOK: query: -- Join varchar with string -select * from varchar_join1_vc1 a join varchar_join1_str b on (a.c2 = b.c2) order by a.c1 +POSTHOOK: query: select * from varchar_join1_vc1 a join varchar_join1_str b on (a.c2 = b.c2) order by a.c1 POSTHOOK: type: QUERY POSTHOOK: Input: default@varchar_join1_str POSTHOOK: Input: default@varchar_join1_vc1 diff --git a/ql/src/test/results/clientpositive/spark/vector_between_in.q.out b/ql/src/test/results/clientpositive/spark/vector_between_in.q.out index 500f69c..54f406d 100644 --- a/ql/src/test/results/clientpositive/spark/vector_between_in.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_between_in.q.out @@ -693,13 +693,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_date_test #### A masked pattern was here #### 6172 -PREHOOK: query: -- projections - -EXPLAIN SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0 +PREHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0 PREHOOK: type: QUERY -POSTHOOK: query: -- projections - -EXPLAIN SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0 +POSTHOOK: query: EXPLAIN SELECT c0, count(1) from (SELECT cdate IN (CAST("1969-10-26" AS DATE), CAST("1969-07-14" AS DATE)) as c0 FROM decimal_date_test) tab GROUP BY c0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out index 424a2c9..e11af12 100644 --- a/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_cast_constant.q.out @@ -6,8 +6,7 @@ PREHOOK: query: DROP TABLE over1korc PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE over1korc POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE over1k(t tinyint, +PREHOOK: query: CREATE TABLE over1k(t tinyint, si smallint, i int, b bigint, @@ -23,8 +22,7 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@over1k -POSTHOOK: query: -- data setup -CREATE TABLE over1k(t tinyint, +POSTHOOK: query: CREATE TABLE over1k(t tinyint, si smallint, i int, b bigint, diff --git a/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out b/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out index 73272fb..5100fc2 100644 --- a/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_count_distinct.q.out @@ -90,9 +90,7 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/web_sales_2k' OVERWRIT POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@web_sales_txt -PREHOOK: query: ------------------------------------------------------------------------------------------ - -create table web_sales +PREHOOK: query: create table web_sales ( ws_sold_date_sk int, ws_sold_time_sk int, @@ -137,9 +135,7 @@ tblproperties ("orc.stripe.size"="33554432", "orc.compress.size"="16384") PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@web_sales -POSTHOOK: query: ------------------------------------------------------------------------------------------ - -create table web_sales +POSTHOOK: query: create table web_sales ( ws_sold_date_sk int, ws_sold_time_sk int, @@ -1229,14 +1225,10 @@ POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_sold_time_sk SIMPLE POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_warehouse_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_warehouse_sk, type:int, comment:null), ] POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_web_page_sk SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_web_page_sk, type:int, comment:null), ] POSTHOOK: Lineage: web_sales PARTITION(ws_web_site_sk=9).ws_wholesale_cost SIMPLE [(web_sales_txt)web_sales_txt.FieldSchema(name:ws_wholesale_cost, type:decimal(7,2), comment:null), ] -PREHOOK: query: ------------------------------------------------------------------------------------------ - -explain +PREHOOK: query: explain select count(distinct ws_order_number) from web_sales PREHOOK: type: QUERY -POSTHOOK: query: ------------------------------------------------------------------------------------------ - -explain +POSTHOOK: query: explain select count(distinct ws_order_number) from web_sales POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/spark/vector_data_types.q.out b/ql/src/test/results/clientpositive/spark/vector_data_types.q.out index dbaf14d..182dad1 100644 --- a/ql/src/test/results/clientpositive/spark/vector_data_types.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_data_types.q.out @@ -6,8 +6,7 @@ PREHOOK: query: DROP TABLE over1korc PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE over1korc POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE over1k(t tinyint, +PREHOOK: query: CREATE TABLE over1k(t tinyint, si smallint, i int, b bigint, @@ -23,8 +22,7 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@over1k -POSTHOOK: query: -- data setup -CREATE TABLE over1k(t tinyint, +POSTHOOK: query: CREATE TABLE over1k(t tinyint, si smallint, i int, b bigint, diff --git a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out index cfdfce1..a36efc2 100644 --- a/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_decimal_aggregate.q.out @@ -20,20 +20,14 @@ POSTHOOK: Lineage: decimal_vgby.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.F POSTHOOK: Lineage: decimal_vgby.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] POSTHOOK: Lineage: decimal_vgby.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] POSTHOOK: Lineage: decimal_vgby.cint SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cint, type:int, comment:null), ] -PREHOOK: query: -- SORT_QUERY_RESULTS - --- First only do simple aggregations that output primitives only -EXPLAIN SELECT cint, +PREHOOK: query: EXPLAIN SELECT cint, COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), COUNT(cdecimal2), MAX(cdecimal2), MIN(cdecimal2), SUM(cdecimal2) FROM decimal_vgby GROUP BY cint HAVING COUNT(*) > 1 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- First only do simple aggregations that output primitives only -EXPLAIN SELECT cint, +POSTHOOK: query: EXPLAIN SELECT cint, COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), COUNT(cdecimal2), MAX(cdecimal2), MIN(cdecimal2), SUM(cdecimal2) FROM decimal_vgby @@ -129,16 +123,14 @@ POSTHOOK: Input: default@decimal_vgby 6981 3 5831542.2692483780 -515.6210729730 5830511.0271024320 3 6984454.21109769200000 -617.56077692307690 6983219.08954384584620 762 2 5831542.2692483780 1531.2194054054 5833073.4886537834 2 6984454.21109769200000 1833.94569230769250 6986288.15678999969250 NULL 3072 9318.4351351351 -4298.1513513514 5018444.1081079808 3072 11160.71538461538500 -5147.90769230769300 6010604.30769230735360 -PREHOOK: query: -- Now add the others... -EXPLAIN SELECT cint, +PREHOOK: query: EXPLAIN SELECT cint, COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1), COUNT(cdecimal2), MAX(cdecimal2), MIN(cdecimal2), SUM(cdecimal2), AVG(cdecimal2), STDDEV_POP(cdecimal2), STDDEV_SAMP(cdecimal2) FROM decimal_vgby GROUP BY cint HAVING COUNT(*) > 1 PREHOOK: type: QUERY -POSTHOOK: query: -- Now add the others... -EXPLAIN SELECT cint, +POSTHOOK: query: EXPLAIN SELECT cint, COUNT(cdecimal1), MAX(cdecimal1), MIN(cdecimal1), SUM(cdecimal1), AVG(cdecimal1), STDDEV_POP(cdecimal1), STDDEV_SAMP(cdecimal1), COUNT(cdecimal2), MAX(cdecimal2), MIN(cdecimal2), SUM(cdecimal2), AVG(cdecimal2), STDDEV_POP(cdecimal2), STDDEV_SAMP(cdecimal2) FROM decimal_vgby diff --git a/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out b/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out index 803a53b..e17c00b 100644 --- a/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_distinct_2.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table vectortab2k( +PREHOOK: query: create table vectortab2k( t tinyint, si smallint, i int, @@ -19,9 +17,7 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@vectortab2k -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table vectortab2k( +POSTHOOK: query: create table vectortab2k( t tinyint, si smallint, i int, diff --git a/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out b/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out index e13c311..40b6877 100644 --- a/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_groupby_3.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table vectortab2k( +PREHOOK: query: create table vectortab2k( t tinyint, si smallint, i int, @@ -19,9 +17,7 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@vectortab2k -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table vectortab2k( +POSTHOOK: query: create table vectortab2k( t tinyint, si smallint, i int, diff --git a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out index 14544c5..ea2c9a7 100644 --- a/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_mapjoin_reduce.q.out @@ -1,22 +1,10 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- Verify HIVE-8097 with a query that has a Vectorized MapJoin in the Reducer. --- Query copied from subquery_in.q - --- non agg, non corr, with join in Parent Query -explain +PREHOOK: query: explain select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR') PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- Verify HIVE-8097 with a query that has a Vectorized MapJoin in the Reducer. --- Query copied from subquery_in.q - --- non agg, non corr, with join in Parent Query -explain +POSTHOOK: query: explain select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and @@ -185,15 +173,13 @@ POSTHOOK: Input: default@lineitem 61336 8855 64128 9141 82704 7721 -PREHOOK: query: -- non agg, corr, with join in Parent Query -explain +PREHOOK: query: explain select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) PREHOOK: type: QUERY -POSTHOOK: query: -- non agg, corr, with join in Parent Query -explain +POSTHOOK: query: explain select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out index 0b04bfc..8e695c6 100644 --- a/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_outer_join0.q.out @@ -131,16 +131,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c +PREHOOK: query: select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c PREHOOK: type: QUERY PREHOOK: Input: default@orc_table_1 PREHOOK: Input: default@orc_table_2 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c +POSTHOOK: query: select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_table_1 POSTHOOK: Input: default@orc_table_2 @@ -224,16 +220,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c +PREHOOK: query: select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c PREHOOK: type: QUERY PREHOOK: Input: default@orc_table_1 PREHOOK: Input: default@orc_table_2 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c +POSTHOOK: query: select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_table_1 POSTHOOK: Input: default@orc_table_2 diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out index 6b89fb3..b5ebd24 100644 --- a/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_outer_join1.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Using cint and ctinyint in test queries -create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +PREHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default PREHOOK: Output: default@small_alltypesorc1a -POSTHOOK: query: -- Using cint and ctinyint in test queries -create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +POSTHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default @@ -295,18 +293,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select * +PREHOOK: query: select * from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cint = c.cint PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_a #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select * +POSTHOOK: query: select * from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cint = c.cint @@ -411,18 +405,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select c.ctinyint +PREHOOK: query: select c.ctinyint from small_alltypesorc_a c left outer join small_alltypesorc_a hd on hd.ctinyint = c.ctinyint PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_a #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select c.ctinyint +POSTHOOK: query: select c.ctinyint from small_alltypesorc_a c left outer join small_alltypesorc_a hd on hd.ctinyint = c.ctinyint @@ -664,9 +654,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint +PREHOOK: query: select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cint = c.cint @@ -676,9 +664,7 @@ left outer join small_alltypesorc_a hd PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_a #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint +POSTHOOK: query: select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cint = c.cint diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out index 113c7d0..4262aa1 100644 --- a/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_outer_join2.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Using cint and cbigint in test queries -create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +PREHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default PREHOOK: Output: default@small_alltypesorc1a -POSTHOOK: query: -- Using cint and cbigint in test queries -create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +POSTHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default @@ -356,9 +354,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint +PREHOOK: query: select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cint = c.cint @@ -368,9 +364,7 @@ left outer join small_alltypesorc_a hd PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_a #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint +POSTHOOK: query: select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cint = c.cint diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out index c5a8de5..30a0eee 100644 --- a/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_outer_join3.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Using cint and cstring1 in test queries -create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +PREHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default PREHOOK: Output: default@small_alltypesorc1a -POSTHOOK: query: -- Using cint and cstring1 in test queries -create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +POSTHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default @@ -355,9 +353,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select count(*) from (select c.cstring1 +PREHOOK: query: select count(*) from (select c.cstring1 from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cint = c.cint @@ -367,9 +363,7 @@ left outer join small_alltypesorc_a hd PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_a #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select count(*) from (select c.cstring1 +POSTHOOK: query: select count(*) from (select c.cstring1 from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cint = c.cint @@ -509,9 +503,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select count(*) from (select c.cstring1 +PREHOOK: query: select count(*) from (select c.cstring1 from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cstring2 = c.cstring2 @@ -521,9 +513,7 @@ left outer join small_alltypesorc_a hd PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_a #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select count(*) from (select c.cstring1 +POSTHOOK: query: select count(*) from (select c.cstring1 from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cstring2 = c.cstring2 @@ -663,9 +653,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select count(*) from (select c.cstring1 +PREHOOK: query: select count(*) from (select c.cstring1 from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint @@ -675,9 +663,7 @@ left outer join small_alltypesorc_a hd PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_a #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select count(*) from (select c.cstring1 +POSTHOOK: query: select count(*) from (select c.cstring1 from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out index 94860ab..da12cf4 100644 --- a/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_outer_join4.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Using cint and ctinyint in test queries -create table small_alltypesorc1b as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10 +PREHOOK: query: create table small_alltypesorc1b as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default PREHOOK: Output: default@small_alltypesorc1b -POSTHOOK: query: -- Using cint and ctinyint in test queries -create table small_alltypesorc1b as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10 +POSTHOOK: query: create table small_alltypesorc1b as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default @@ -325,18 +323,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select * +PREHOOK: query: select * from small_alltypesorc_b c left outer join small_alltypesorc_b cd on cd.cint = c.cint PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_b #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select * +POSTHOOK: query: select * from small_alltypesorc_b c left outer join small_alltypesorc_b cd on cd.cint = c.cint @@ -476,18 +470,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select c.ctinyint +PREHOOK: query: select c.ctinyint from small_alltypesorc_b c left outer join small_alltypesorc_b hd on hd.ctinyint = c.ctinyint PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_b #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select c.ctinyint +POSTHOOK: query: select c.ctinyint from small_alltypesorc_b c left outer join small_alltypesorc_b hd on hd.ctinyint = c.ctinyint @@ -1033,9 +1023,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select count(*) from (select c.ctinyint +PREHOOK: query: select count(*) from (select c.ctinyint from small_alltypesorc_b c left outer join small_alltypesorc_b cd on cd.cint = c.cint @@ -1045,9 +1033,7 @@ left outer join small_alltypesorc_b hd PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_b #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select count(*) from (select c.ctinyint +POSTHOOK: query: select count(*) from (select c.ctinyint from small_alltypesorc_b c left outer join small_alltypesorc_b cd on cd.cint = c.cint diff --git a/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out b/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out index bd9b852..9e1742f 100644 --- a/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_outer_join5.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table sorted_mod_4 stored as orc +PREHOOK: query: create table sorted_mod_4 stored as orc as select ctinyint, pmod(cint, 4) as cmodint from alltypesorc where cint is not null and ctinyint is not null order by ctinyint @@ -8,9 +6,7 @@ PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default PREHOOK: Output: default@sorted_mod_4 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table sorted_mod_4 stored as orc +POSTHOOK: query: create table sorted_mod_4 stored as orc as select ctinyint, pmod(cint, 4) as cmodint from alltypesorc where cint is not null and ctinyint is not null order by ctinyint diff --git a/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out b/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out index 5497426..6dec92a 100644 --- a/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out +++ b/ql/src/test/results/clientpositive/spark/vector_string_concat.q.out @@ -6,8 +6,7 @@ PREHOOK: query: DROP TABLE over1korc PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE over1korc POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE over1k(t tinyint, +PREHOOK: query: CREATE TABLE over1k(t tinyint, si smallint, i int, b bigint, @@ -23,8 +22,7 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@over1k -POSTHOOK: query: -- data setup -CREATE TABLE over1k(t tinyint, +POSTHOOK: query: CREATE TABLE over1k(t tinyint, si smallint, i int, b bigint, @@ -158,9 +156,7 @@ sarah garcia sarah garcia | sarah garcia| zach young zach young | zach young| david underhill david underhill | david underhill| yuri carson yuri carson | yuri carson| -PREHOOK: query: ------------------------------------------------------------------------------------------ - -create table vectortab2k( +PREHOOK: query: create table vectortab2k( t tinyint, si smallint, i int, @@ -179,9 +175,7 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@vectortab2k -POSTHOOK: query: ------------------------------------------------------------------------------------------ - -create table vectortab2k( +POSTHOOK: query: create table vectortab2k( t tinyint, si smallint, i int, diff --git a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out index e5f5359..6352512 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- Use ORDER BY clauses to generate 2 stages. -EXPLAIN +PREHOOK: query: EXPLAIN SELECT MIN(ctinyint) as c1, MAX(ctinyint), COUNT(ctinyint), @@ -9,10 +6,7 @@ SELECT MIN(ctinyint) as c1, FROM alltypesorc ORDER BY c1 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- Use ORDER BY clauses to generate 2 stages. -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT MIN(ctinyint) as c1, MAX(ctinyint), COUNT(ctinyint), @@ -29901,14 +29895,12 @@ POSTHOOK: query: select ctinyint, csmallint, cint, cbigint, cdouble, cdouble, cs POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### -PREHOOK: query: --test to make sure multi and/or expressions are being vectorized -explain extended select * from alltypesorc where +PREHOOK: query: explain extended select * from alltypesorc where (cint=49 and cfloat=3.5) or (cint=47 and cfloat=2.09) or (cint=45 and cfloat=3.02) PREHOOK: type: QUERY -POSTHOOK: query: --test to make sure multi and/or expressions are being vectorized -explain extended select * from alltypesorc where +POSTHOOK: query: explain extended select * from alltypesorc where (cint=49 and cfloat=3.5) or (cint=47 and cfloat=2.09) or (cint=45 and cfloat=3.02) @@ -30182,13 +30174,11 @@ POSTHOOK: Input: default@cast_string_to_int_1 POSTHOOK: Output: default@cast_string_to_int_2 POSTHOOK: Lineage: cast_string_to_int_2.i EXPRESSION [(cast_string_to_int_1)cast_string_to_int_1.FieldSchema(name:c0, type:string, comment:null), ] POSTHOOK: Lineage: cast_string_to_int_2.s SIMPLE [(cast_string_to_int_1)cast_string_to_int_1.FieldSchema(name:value, type:string, comment:null), ] -PREHOOK: query: --moving ALL_1 system test here -select all key from src +PREHOOK: query: select all key from src PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: --moving ALL_1 system test here -select all key from src +POSTHOOK: query: select all key from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/spark/vectorization_1.q.out b/ql/src/test/results/clientpositive/spark/vectorization_1.q.out index 1e4c00c..e0a4344 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_1.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_1.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT VAR_POP(ctinyint), +PREHOOK: query: SELECT VAR_POP(ctinyint), (VAR_POP(ctinyint) / -26.28), SUM(cfloat), (-1.389 + SUM(cfloat)), @@ -22,9 +20,7 @@ WHERE (((cdouble > ctinyint) PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT VAR_POP(ctinyint), +POSTHOOK: query: SELECT VAR_POP(ctinyint), (VAR_POP(ctinyint) / -26.28), SUM(cfloat), (-1.389 + SUM(cfloat)), diff --git a/ql/src/test/results/clientpositive/spark/vectorization_10.q.out b/ql/src/test/results/clientpositive/spark/vectorization_10.q.out index ba281f7..9dad4c4 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_10.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_10.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT cdouble, +PREHOOK: query: SELECT cdouble, ctimestamp1, ctinyint, cboolean1, @@ -25,9 +23,7 @@ WHERE (((cstring2 <= '10') PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT cdouble, +POSTHOOK: query: SELECT cdouble, ctimestamp1, ctinyint, cboolean1, diff --git a/ql/src/test/results/clientpositive/spark/vectorization_11.q.out b/ql/src/test/results/clientpositive/spark/vectorization_11.q.out index 0fe4c48..dff58da 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_11.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_11.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT cstring1, +PREHOOK: query: SELECT cstring1, cboolean1, cdouble, ctimestamp1, @@ -16,9 +14,7 @@ WHERE ((cstring2 = cstring1) PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT cstring1, +POSTHOOK: query: SELECT cstring1, cboolean1, cdouble, ctimestamp1, diff --git a/ql/src/test/results/clientpositive/spark/vectorization_12.q.out b/ql/src/test/results/clientpositive/spark/vectorization_12.q.out index a199fe8..6a7f69c 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_12.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_12.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT cbigint, +PREHOOK: query: SELECT cbigint, cboolean1, cstring1, ctimestamp1, @@ -33,9 +31,7 @@ ORDER BY ctimestamp1, cdouble, cbigint, cstring1 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT cbigint, +POSTHOOK: query: SELECT cbigint, cboolean1, cstring1, ctimestamp1, diff --git a/ql/src/test/results/clientpositive/spark/vectorization_13.q.out b/ql/src/test/results/clientpositive/spark/vectorization_13.q.out index 6d5c9d9..1a30288 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_13.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_13.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT cboolean1, ctinyint, ctimestamp1, @@ -33,9 +31,7 @@ GROUP BY cboolean1, ctinyint, ctimestamp1, cfloat, cstring1 ORDER BY cboolean1, ctinyint, ctimestamp1, cfloat, cstring1, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16 LIMIT 40 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT cboolean1, ctinyint, ctimestamp1, @@ -254,8 +250,7 @@ NULL -63 1969-12-31 16:00:15.436 -63.0 NULL 63 -63 0 -63.0 -0.0 63.0 -5011.839 0 NULL -64 1969-12-31 16:00:11.912 -64.0 NULL 64 -64 0 -64.0 -0.0 64.0 -5091.392 0.0 64.0 0.0 -10.175 -64.0 0.410625 -64.0 0.0 -64 NULL -64 1969-12-31 16:00:12.339 -64.0 NULL 64 -64 0 -64.0 -0.0 64.0 -5091.392 0.0 64.0 0.0 -10.175 -64.0 0.410625 -64.0 0.0 -64 NULL -64 1969-12-31 16:00:13.274 -64.0 NULL 64 -64 0 -64.0 -0.0 64.0 -5091.392 0.0 64.0 0.0 -10.175 -64.0 0.410625 -64.0 0.0 -64 -PREHOOK: query: -- double compare timestamp -EXPLAIN +PREHOOK: query: EXPLAIN SELECT cboolean1, ctinyint, ctimestamp1, @@ -288,8 +283,7 @@ GROUP BY cboolean1, ctinyint, ctimestamp1, cfloat, cstring1 ORDER BY cboolean1, ctinyint, ctimestamp1, cfloat, cstring1, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16 LIMIT 40 PREHOOK: type: QUERY -POSTHOOK: query: -- double compare timestamp -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT cboolean1, ctinyint, ctimestamp1, diff --git a/ql/src/test/results/clientpositive/spark/vectorization_14.q.out b/ql/src/test/results/clientpositive/spark/vectorization_14.q.out index 99192bd..f1fca9f 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_14.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_14.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT ctimestamp1, cfloat, cstring1, @@ -33,9 +31,7 @@ WHERE (((ctinyint <= cbigint) GROUP BY ctimestamp1, cfloat, cstring1, cboolean1, cdouble ORDER BY cstring1, cfloat, cdouble, ctimestamp1 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT ctimestamp1, cfloat, cstring1, diff --git a/ql/src/test/results/clientpositive/spark/vectorization_15.q.out b/ql/src/test/results/clientpositive/spark/vectorization_15.q.out index 0a676bd..42d888f 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_15.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_15.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT cfloat, cboolean1, cdouble, @@ -31,9 +29,7 @@ WHERE (((cstring2 LIKE '%ss%') GROUP BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1 ORDER BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT cfloat, cboolean1, cdouble, diff --git a/ql/src/test/results/clientpositive/spark/vectorization_16.q.out b/ql/src/test/results/clientpositive/spark/vectorization_16.q.out index f398d46..e6fca7d 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_16.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_16.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT cstring1, cdouble, ctimestamp1, @@ -20,9 +18,7 @@ WHERE ((cstring2 LIKE '%b%') OR (cstring1 < 'a'))) GROUP BY cstring1, cdouble, ctimestamp1 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT cstring1, cdouble, ctimestamp1, diff --git a/ql/src/test/results/clientpositive/spark/vectorization_17.q.out b/ql/src/test/results/clientpositive/spark/vectorization_17.q.out index 878dcce..df117b4 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_17.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_17.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT cfloat, cstring1, cint, @@ -24,9 +22,7 @@ WHERE (((cbigint > -23) OR (cfloat = cdouble)))) ORDER BY cbigint, cfloat PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT cfloat, cstring1, cint, diff --git a/ql/src/test/results/clientpositive/spark/vectorization_2.q.out b/ql/src/test/results/clientpositive/spark/vectorization_2.q.out index e9df371..709a75f 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_2.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_2.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT AVG(csmallint), +PREHOOK: query: SELECT AVG(csmallint), (AVG(csmallint) % -563), (AVG(csmallint) + 762), SUM(cfloat), @@ -24,9 +22,7 @@ WHERE (((ctimestamp1 < ctimestamp2) PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT AVG(csmallint), +POSTHOOK: query: SELECT AVG(csmallint), (AVG(csmallint) % -563), (AVG(csmallint) + 762), SUM(cfloat), diff --git a/ql/src/test/results/clientpositive/spark/vectorization_3.q.out b/ql/src/test/results/clientpositive/spark/vectorization_3.q.out index 73b2cdd..2398dee 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_3.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_3.q.out @@ -1,7 +1,5 @@ WARNING: Comparing a bigint and a double may result in a loss of precision. -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT STDDEV_SAMP(csmallint), +PREHOOK: query: SELECT STDDEV_SAMP(csmallint), (STDDEV_SAMP(csmallint) - 10.175), STDDEV_POP(ctinyint), (STDDEV_SAMP(csmallint) * (STDDEV_SAMP(csmallint) - 10.175)), @@ -27,9 +25,7 @@ WHERE (((cint <= cfloat) PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT STDDEV_SAMP(csmallint), +POSTHOOK: query: SELECT STDDEV_SAMP(csmallint), (STDDEV_SAMP(csmallint) - 10.175), STDDEV_POP(ctinyint), (STDDEV_SAMP(csmallint) * (STDDEV_SAMP(csmallint) - 10.175)), diff --git a/ql/src/test/results/clientpositive/spark/vectorization_4.q.out b/ql/src/test/results/clientpositive/spark/vectorization_4.q.out index 0181588..0d6829f 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_4.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_4.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT SUM(cint), +PREHOOK: query: SELECT SUM(cint), (SUM(cint) * -563), (-3728 + SUM(cint)), STDDEV_POP(cdouble), @@ -24,9 +22,7 @@ WHERE (((csmallint >= cint) PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT SUM(cint), +POSTHOOK: query: SELECT SUM(cint), (SUM(cint) * -563), (-3728 + SUM(cint)), STDDEV_POP(cdouble), diff --git a/ql/src/test/results/clientpositive/spark/vectorization_5.q.out b/ql/src/test/results/clientpositive/spark/vectorization_5.q.out index f661fc3..914a626 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_5.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_5.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT MAX(csmallint), +PREHOOK: query: SELECT MAX(csmallint), (MAX(csmallint) * -75), COUNT(*), ((MAX(csmallint) * -75) / COUNT(*)), @@ -21,9 +19,7 @@ WHERE (((cboolean2 IS NOT NULL) PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT MAX(csmallint), +POSTHOOK: query: SELECT MAX(csmallint), (MAX(csmallint) * -75), COUNT(*), ((MAX(csmallint) * -75) / COUNT(*)), diff --git a/ql/src/test/results/clientpositive/spark/vectorization_6.q.out b/ql/src/test/results/clientpositive/spark/vectorization_6.q.out index 2af0885..13897f6 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_6.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_6.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT cboolean1, +PREHOOK: query: SELECT cboolean1, cfloat, cstring1, (988888 * csmallint), @@ -22,9 +20,7 @@ WHERE ((ctinyint != 0) PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT cboolean1, +POSTHOOK: query: SELECT cboolean1, cfloat, cstring1, (988888 * csmallint), diff --git a/ql/src/test/results/clientpositive/spark/vectorization_9.q.out b/ql/src/test/results/clientpositive/spark/vectorization_9.q.out index f398d46..e6fca7d 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_9.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_9.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT cstring1, cdouble, ctimestamp1, @@ -20,9 +18,7 @@ WHERE ((cstring2 LIKE '%b%') OR (cstring1 < 'a'))) GROUP BY cstring1, cdouble, ctimestamp1 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT cstring1, cdouble, ctimestamp1, diff --git a/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out b/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out index a70647e..9bbe22c 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_div0.q.out @@ -1,9 +1,7 @@ -PREHOOK: query: -- TODO: add more stuff here after HIVE-5918 is fixed, such as cbigint and constants -explain +PREHOOK: query: explain select cdouble / 0.0 from alltypesorc limit 100 PREHOOK: type: QUERY -POSTHOOK: query: -- TODO: add more stuff here after HIVE-5918 is fixed, such as cbigint and constants -explain +POSTHOOK: query: explain select cdouble / 0.0 from alltypesorc limit 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -131,17 +129,11 @@ NULL NULL NULL NULL -PREHOOK: query: -- There are no zeros in the table, but there is 988888, so use it as zero - --- TODO: add more stuff here after HIVE-5918 is fixed, such as cbigint and constants as numerators -explain +PREHOOK: query: explain select (cbigint - 988888L) as s1, cdouble / (cbigint - 988888L) as s2, 1.2 / (cbigint - 988888L) from alltypesorc where cbigint > 0 and cbigint < 100000000 order by s1, s2 limit 100 PREHOOK: type: QUERY -POSTHOOK: query: -- There are no zeros in the table, but there is 988888, so use it as zero - --- TODO: add more stuff here after HIVE-5918 is fixed, such as cbigint and constants as numerators -explain +POSTHOOK: query: explain select (cbigint - 988888L) as s1, cdouble / (cbigint - 988888L) as s2, 1.2 / (cbigint - 988888L) from alltypesorc where cbigint > 0 and cbigint < 100000000 order by s1, s2 limit 100 POSTHOOK: type: QUERY @@ -309,15 +301,11 @@ POSTHOOK: Input: default@alltypesorc 59347745 NULL 0.000000020219807846111 60229567 NULL 0.000000019923769334088 60330397 NULL 0.000000019890470801974 -PREHOOK: query: -- There are no zeros in the table, but there is -200.0, so use it as zero - -explain +PREHOOK: query: explain select (cdouble + 200.0) as s1, cbigint / (cdouble + 200.0) as s2, (cdouble + 200.0) / (cdouble + 200.0), cbigint / (cdouble + 200.0), 3 / (cdouble + 200.0), 1.2 / (cdouble + 200.0) from alltypesorc where cdouble >= -500 and cdouble < -199 order by s1, s2 limit 100 PREHOOK: type: QUERY -POSTHOOK: query: -- There are no zeros in the table, but there is -200.0, so use it as zero - -explain +POSTHOOK: query: explain select (cdouble + 200.0) as s1, cbigint / (cdouble + 200.0) as s2, (cdouble + 200.0) / (cdouble + 200.0), cbigint / (cdouble + 200.0), 3 / (cdouble + 200.0), 1.2 / (cdouble + 200.0) from alltypesorc where cdouble >= -500 and cdouble < -199 order by s1, s2 limit 100 POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out index b2aea24..4d8f87b 100644 --- a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out @@ -1,36 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- If you look at ql/src/test/org/apache/hadoop/hive/ql/exec/vector/util/OrcFileGenerator.java --- which is the data generation class you'll see that those values are specified in the --- initializeFixedPointValues for each data type. When I created the queries I usedthose values --- where I needed scalar values to ensure that when the queries executed their predicates would be --- filtering on values that are guaranteed to exist. - --- Beyond those values, all the other data in the alltypesorc file is random, but there is a --- specific pattern to the data that is important for coverage. In orc and subsequently --- vectorization there are a number of optimizations for certain data patterns: AllValues, NoNulls, --- RepeatingValue, RepeatingNull. The data in alltypesorc is generated such that each column has --- exactly 3 batches of each data pattern. This gives us coverage for the vector expression --- optimizations and ensure the metadata in appropriately set on the row batch object which are --- reused across batches. - --- For the queries themselves in order to efficiently cover as much of the new vectorization --- functionality as I could I used a number of different techniques to create the --- vectorization_short_regress.q test suite, primarily equivalence classes, and pairwise --- combinations. - --- First I divided the search space into a number of dimensions such as type, aggregate function, --- filter operation, arithmetic operation, etc. The types were explored as equivalence classes of --- long, double, time, string, and bool. Also, rather than creating a very large number of small --- queries the resulting vectors were grouped by compatible dimensions to reduce the number of --- queries. - --- TargetTypeClasses: Long, Timestamp, Double, String, Bool --- Functions: Avg, Sum, StDevP, StDev, Var, Min, Count --- ArithmeticOps: Add, Multiply, Subtract, Divide --- FilterOps: Equal, NotEqual, GreaterThan, LessThan, LessThanOrEqual --- GroupBy: NoGroupByProjectAggs -EXPLAIN SELECT AVG(cint), +PREHOOK: query: EXPLAIN SELECT AVG(cint), (AVG(cint) + -3728), (-((AVG(cint) + -3728))), (-((-((AVG(cint) + -3728))))), @@ -66,39 +34,7 @@ WHERE ((762 = cbigint) AND ((79.553 != cint) AND (cboolean2 != cboolean1))))) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- If you look at ql/src/test/org/apache/hadoop/hive/ql/exec/vector/util/OrcFileGenerator.java --- which is the data generation class you'll see that those values are specified in the --- initializeFixedPointValues for each data type. When I created the queries I usedthose values --- where I needed scalar values to ensure that when the queries executed their predicates would be --- filtering on values that are guaranteed to exist. - --- Beyond those values, all the other data in the alltypesorc file is random, but there is a --- specific pattern to the data that is important for coverage. In orc and subsequently --- vectorization there are a number of optimizations for certain data patterns: AllValues, NoNulls, --- RepeatingValue, RepeatingNull. The data in alltypesorc is generated such that each column has --- exactly 3 batches of each data pattern. This gives us coverage for the vector expression --- optimizations and ensure the metadata in appropriately set on the row batch object which are --- reused across batches. - --- For the queries themselves in order to efficiently cover as much of the new vectorization --- functionality as I could I used a number of different techniques to create the --- vectorization_short_regress.q test suite, primarily equivalence classes, and pairwise --- combinations. - --- First I divided the search space into a number of dimensions such as type, aggregate function, --- filter operation, arithmetic operation, etc. The types were explored as equivalence classes of --- long, double, time, string, and bool. Also, rather than creating a very large number of small --- queries the resulting vectors were grouped by compatible dimensions to reduce the number of --- queries. - --- TargetTypeClasses: Long, Timestamp, Double, String, Bool --- Functions: Avg, Sum, StDevP, StDev, Var, Min, Count --- ArithmeticOps: Add, Multiply, Subtract, Divide --- FilterOps: Equal, NotEqual, GreaterThan, LessThan, LessThanOrEqual --- GroupBy: NoGroupByProjectAggs -EXPLAIN SELECT AVG(cint), +POSTHOOK: query: EXPLAIN SELECT AVG(cint), (AVG(cint) + -3728), (-((AVG(cint) + -3728))), (-((-((AVG(cint) + -3728))))), @@ -269,12 +205,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### 1.6000018929276082E8 1.5999646129276082E8 -1.5999646129276082E8 1.5999646129276082E8 2.5598867626205912E16 -8706342.964000002 -1.6000018929276082E8 5.481251832900256E8 4.095728233294762E24 8549.657499338187 -5.481251832900256E8 3.8812872199726474E8 2.12743126884874112E17 3.0054786945575034E17 -5.700752675298234 -3.0054786945575034E17 3.0054786945575034E17 973579.3664121237 5.48222463472403E8 -973579.3664121237 -18.377427808018613 -64 2044 -6.573680812059066E-5 18.377427808018613 -PREHOOK: query: -- TargetTypeClasses: Long, Bool, Double, String, Timestamp --- Functions: Max, VarP, StDevP, Avg, Min, StDev, Var --- ArithmeticOps: Divide, Multiply, Remainder, Subtract --- FilterOps: LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual, Like, RLike --- GroupBy: NoGroupByProjectAggs -EXPLAIN SELECT MAX(cint), +PREHOOK: query: EXPLAIN SELECT MAX(cint), (MAX(cint) / -3728), (MAX(cint) * -3728), VAR_POP(cbigint), @@ -307,12 +238,7 @@ WHERE (((cbigint <= 197) OR ((cfloat > 79.553) AND (cstring2 LIKE '10%'))) PREHOOK: type: QUERY -POSTHOOK: query: -- TargetTypeClasses: Long, Bool, Double, String, Timestamp --- Functions: Max, VarP, StDevP, Avg, Min, StDev, Var --- ArithmeticOps: Divide, Multiply, Remainder, Subtract --- FilterOps: LessThan, LessThanOrEqual, GreaterThan, GreaterThanOrEqual, Like, RLike --- GroupBy: NoGroupByProjectAggs -EXPLAIN SELECT MAX(cint), +POSTHOOK: query: EXPLAIN SELECT MAX(cint), (MAX(cint) / -3728), (MAX(cint) * -3728), VAR_POP(cbigint), @@ -474,12 +400,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### -20301111 5445.576984978541 -1626869520 7.9684972882908944E16 1626869520 NULL -563 NULL NULL NULL -8.935323383084578 NULL -1069736047 NULL NULL NULL NULL NULL -5445.576984978541 511 5454.512308361625 1626869520 7.2647256545687792E16 -PREHOOK: query: -- TargetTypeClasses: String, Long, Bool, Double, Timestamp --- Functions: VarP, Count, Max, StDevP, StDev, Avg --- ArithmeticOps: Subtract, Remainder, Multiply, Add --- FilterOps: Equal, LessThanOrEqual, GreaterThan, Like, LessThan --- GroupBy: NoGroupByProjectAggs -EXPLAIN SELECT VAR_POP(cbigint), +PREHOOK: query: EXPLAIN SELECT VAR_POP(cbigint), (-(VAR_POP(cbigint))), (VAR_POP(cbigint) - (-(VAR_POP(cbigint)))), COUNT(*), @@ -511,12 +432,7 @@ WHERE ((ctimestamp1 = ctimestamp2) AND ((ctimestamp2 IS NOT NULL) AND (cstring2 > 'a')))) PREHOOK: type: QUERY -POSTHOOK: query: -- TargetTypeClasses: String, Long, Bool, Double, Timestamp --- Functions: VarP, Count, Max, StDevP, StDev, Avg --- ArithmeticOps: Subtract, Remainder, Multiply, Add --- FilterOps: Equal, LessThanOrEqual, GreaterThan, Like, LessThan --- GroupBy: NoGroupByProjectAggs -EXPLAIN SELECT VAR_POP(cbigint), +POSTHOOK: query: EXPLAIN SELECT VAR_POP(cbigint), (-(VAR_POP(cbigint))), (VAR_POP(cbigint) - (-(VAR_POP(cbigint)))), COUNT(*), @@ -676,12 +592,7 @@ POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### 2.5109214708345636E18 -2.5109214708345636E18 5.0218429416691272E18 2780 75.198 62 2.5109214708345661E18 2.5109214708345636E18 -1.0 2780 -2780 9460.675803068349 -2.5109214708345636E18 -2118360 1072872630 -2118298 -2.5109214697616911E18 185935.34910862707 0 758 -1.733509234828496 -3728 WARNING: Comparing a bigint and a double may result in a loss of precision. -PREHOOK: query: -- TargetTypeClasses: String, Bool, Timestamp, Long, Double --- Functions: Avg, Max, StDev, VarP --- ArithmeticOps: Add, Divide, Remainder, Multiply --- FilterOps: LessThanOrEqual, NotEqual, GreaterThanOrEqual, LessThan, Equal --- GroupBy: NoGroupByProjectAggs -EXPLAIN SELECT AVG(ctinyint), +PREHOOK: query: EXPLAIN SELECT AVG(ctinyint), (AVG(ctinyint) + 6981), ((AVG(ctinyint) + 6981) + AVG(ctinyint)), MAX(cbigint), @@ -703,12 +614,7 @@ WHERE (((ctimestamp2 <= ctimestamp1) AND (ctimestamp1 >= 0)) OR (cfloat = 17)) PREHOOK: type: QUERY -POSTHOOK: query: -- TargetTypeClasses: String, Bool, Timestamp, Long, Double --- Functions: Avg, Max, StDev, VarP --- ArithmeticOps: Add, Divide, Remainder, Multiply --- FilterOps: LessThanOrEqual, NotEqual, GreaterThanOrEqual, LessThan, Equal --- GroupBy: NoGroupByProjectAggs -EXPLAIN SELECT AVG(ctinyint), +POSTHOOK: query: EXPLAIN SELECT AVG(ctinyint), (AVG(ctinyint) + 6981), ((AVG(ctinyint) + 6981) + AVG(ctinyint)), MAX(cbigint), @@ -838,12 +744,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### -0.5934409161894847 6980.406559083811 6979.813118167622 2141851355 -11761.597368421053 -6980.406559083811 1.5852855222071937E8 -0.5934409161894847 2.5099887741860852E16 1.52140608502098816E18 -2141851355 -13.510823917813237 79.553 -3.998255191435157E19 -PREHOOK: query: -- TargetTypeClasses: Timestamp, String, Long, Double, Bool --- Functions: Max, Avg, Min, Var, StDev, Count, StDevP, Sum --- ArithmeticOps: Multiply, Subtract, Add, Divide --- FilterOps: Like, NotEqual, LessThan, GreaterThanOrEqual, GreaterThan, RLike --- GroupBy: NoGroupByProjectColumns -EXPLAIN SELECT cint, +PREHOOK: query: EXPLAIN SELECT cint, cdouble, ctimestamp2, cstring1, @@ -879,12 +780,7 @@ WHERE (((cstring1 RLIKE 'a.*') ORDER BY cint, cdouble, ctimestamp2, cstring1, cboolean2, ctinyint, cfloat, ctimestamp1, csmallint, cbigint, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13 LIMIT 50 PREHOOK: type: QUERY -POSTHOOK: query: -- TargetTypeClasses: Timestamp, String, Long, Double, Bool --- Functions: Max, Avg, Min, Var, StDev, Count, StDevP, Sum --- ArithmeticOps: Multiply, Subtract, Add, Divide --- FilterOps: Like, NotEqual, LessThan, GreaterThanOrEqual, GreaterThan, RLike --- GroupBy: NoGroupByProjectColumns -EXPLAIN SELECT cint, +POSTHOOK: query: EXPLAIN SELECT cint, cdouble, ctimestamp2, cstring1, @@ -1099,12 +995,7 @@ NULL -7196.0 1969-12-31 15:59:58.174 NULL false -64 -64.0 1969-12-31 15:59:56.04 NULL -7196.0 1969-12-31 15:59:58.174 NULL false -64 -64.0 1969-12-31 16:00:01.785 -7196 -1639157869 6110780535632 NULL NULL 7196 -14392 -7196 NULL NULL 64.0 6.4051596E8 -6.471915929812072E-5 64 -1.2430156 NULL -7196.0 1969-12-31 15:59:58.174 NULL false -64 -64.0 1969-12-31 16:00:11.912 -7196 -1615920595 6024151978160 NULL NULL 7196 -14392 -7196 NULL NULL 64.0 6.4051596E8 -6.471915929812072E-5 64 -1.2430156 NULL -7196.0 1969-12-31 15:59:58.174 NULL false -64 -64.0 1969-12-31 16:00:12.339 -7196 1805860756 -6732248898368 NULL NULL 7196 -14392 -7196 NULL NULL 64.0 6.4051596E8 -6.471915929812072E-5 64 -1.2430156 -PREHOOK: query: -- TargetTypeClasses: Long, String, Double, Bool, Timestamp --- Functions: VarP, Var, StDev, StDevP, Max, Sum --- ArithmeticOps: Divide, Remainder, Subtract, Multiply --- FilterOps: Equal, LessThanOrEqual, LessThan, Like, GreaterThanOrEqual, NotEqual, GreaterThan --- GroupBy: NoGroupByProjectColumns -EXPLAIN SELECT cint, +PREHOOK: query: EXPLAIN SELECT cint, cbigint, cstring1, cboolean1, @@ -1139,12 +1030,7 @@ WHERE (((197 > ctinyint) ORDER BY cint, cbigint, cstring1, cboolean1, cfloat, cdouble, ctimestamp2, csmallint, cstring2, cboolean2, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15 LIMIT 25 PREHOOK: type: QUERY -POSTHOOK: query: -- TargetTypeClasses: Long, String, Double, Bool, Timestamp --- Functions: VarP, Var, StDev, StDevP, Max, Sum --- ArithmeticOps: Divide, Remainder, Subtract, Multiply --- FilterOps: Equal, LessThanOrEqual, LessThan, Like, GreaterThanOrEqual, NotEqual, GreaterThan --- GroupBy: NoGroupByProjectColumns -EXPLAIN SELECT cint, +POSTHOOK: query: EXPLAIN SELECT cint, cbigint, cstring1, cboolean1, @@ -1309,12 +1195,7 @@ POSTHOOK: Input: default@alltypesorc -462839731 988888 ss false -51.0 NULL NULL NULL Lml5J2QBU77 false -468.04059812638036 44.210 468.04059812638036 10.175 51.0 -102.0 -102.0 NULL NULL -988888 417.04059812638036 NULL 3569 NULL NULL -635141101 -89010 ss false -51.0 NULL NULL NULL rVWAj4N1MCg8Scyp7wj2C true 7135.6151106617235 -69.746 -7135.6151106617235 10.175 51.0 -102.0 -102.0 NULL NULL 89010 -7186.6151106617235 NULL 3569 NULL NULL WARNING: Comparing a bigint and a double may result in a loss of precision. -PREHOOK: query: -- TargetTypeClasses: String, Bool, Double, Long, Timestamp --- Functions: Sum, Max, Avg, Var, StDevP, VarP --- ArithmeticOps: Add, Subtract, Divide, Multiply, Remainder --- FilterOps: NotEqual, GreaterThanOrEqual, Like, LessThanOrEqual, Equal, GreaterThan --- GroupBy: NoGroupByProjectColumns -EXPLAIN SELECT cint, +PREHOOK: query: EXPLAIN SELECT cint, cstring1, cboolean2, ctimestamp2, @@ -1348,12 +1229,7 @@ WHERE (((csmallint > -26.28) ORDER BY cboolean1, cstring1, ctimestamp2, cfloat, cbigint, cstring1, cdouble, cint, csmallint, cdouble, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13 LIMIT 75 PREHOOK: type: QUERY -POSTHOOK: query: -- TargetTypeClasses: String, Bool, Double, Long, Timestamp --- Functions: Sum, Max, Avg, Var, StDevP, VarP --- ArithmeticOps: Add, Subtract, Divide, Multiply, Remainder --- FilterOps: NotEqual, GreaterThanOrEqual, Like, LessThanOrEqual, Equal, GreaterThan --- GroupBy: NoGroupByProjectColumns -EXPLAIN SELECT cint, +POSTHOOK: query: EXPLAIN SELECT cint, cstring1, cboolean2, ctimestamp2, @@ -1589,12 +1465,7 @@ NULL NULL true 1969-12-31 15:59:58.456 15601.0 -62.0 667693308 15601 NULL NULL 6 NULL NULL true 1969-12-31 15:59:58.456 15601.0 -63.0 -200542601 15601 NULL NULL -200542538 200542601 63.0 -401085139 1.0 -15601.0 NULL -2.00558202E8 0.0220476 -7347.0 -15601 NULL NULL NULL true 1969-12-31 15:59:58.456 15601.0 -63.0 -721244708 15601 NULL NULL -721244645 721244708 63.0 -1442489353 1.0 -15601.0 NULL -7.21260309E8 0.0220476 -10478.0 -15601 NULL NULL NULL true 1969-12-31 15:59:58.456 15601.0 -64.0 -1809291815 15601 NULL NULL -1809291751 1809291815 64.0 -3618583566 1.0 -15601.0 NULL -1.809307416E9 0.0217031 -12643.0 -15601 NULL -PREHOOK: query: -- TargetTypeClasses: Long, String, Double, Timestamp --- Functions: Avg, Min, StDevP, Sum, Var --- ArithmeticOps: Divide, Subtract, Multiply, Remainder --- FilterOps: GreaterThan, LessThan, LessThanOrEqual, GreaterThanOrEqual, Like --- GroupBy: NoGroupByProjectColumns -EXPLAIN SELECT ctimestamp1, +PREHOOK: query: EXPLAIN SELECT ctimestamp1, cstring2, cdouble, cfloat, @@ -1621,12 +1492,7 @@ WHERE (((-1.389 >= cint) ORDER BY csmallint, cstring2, cdouble, cfloat, cbigint, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10 LIMIT 45 PREHOOK: type: QUERY -POSTHOOK: query: -- TargetTypeClasses: Long, String, Double, Timestamp --- Functions: Avg, Min, StDevP, Sum, Var --- ArithmeticOps: Divide, Subtract, Multiply, Remainder --- FilterOps: GreaterThan, LessThan, LessThanOrEqual, GreaterThanOrEqual, Like --- GroupBy: NoGroupByProjectColumns -EXPLAIN SELECT ctimestamp1, +POSTHOOK: query: EXPLAIN SELECT ctimestamp1, cstring2, cdouble, cfloat, @@ -1810,12 +1676,7 @@ POSTHOOK: Input: default@alltypesorc NULL 4hA4KQj2vD3fI6gX82220d 12329.0 NULL -1887561756 12329 -528876.9279910339 -12586 NULL -12329.0 125447.57500000001 NULL NULL -3104 -12329.0 -1.52004241E8 NULL 4hA4KQj2vD3fI6gX82220d 477.0 NULL -1887561756 477 -528876.9279910339 -734 NULL -477.0 4853.475 NULL NULL -326 -477.0 -227529.0 NULL xH7445Rals48VOulSyR5F 10221.0 NULL -1645852809 10221 -461152.37013168953 -10478 NULL -10221.0 103998.675 NULL NULL 5022 -10221.0 -1.04468841E8 -PREHOOK: query: -- TargetTypeClasses: Double, String, Long --- Functions: StDev, Sum, VarP, Count --- ArithmeticOps: Remainder, Divide, Subtract --- FilterOps: GreaterThanOrEqual, Equal, LessThanOrEqual --- GroupBy: GroupBy -EXPLAIN SELECT csmallint, +PREHOOK: query: EXPLAIN SELECT csmallint, (csmallint % -75) as c1, STDDEV_SAMP(csmallint) as c2, (-1.389 / csmallint) as c3, @@ -1835,12 +1696,7 @@ GROUP BY csmallint ORDER BY csmallint, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10 LIMIT 20 PREHOOK: type: QUERY -POSTHOOK: query: -- TargetTypeClasses: Double, String, Long --- Functions: StDev, Sum, VarP, Count --- ArithmeticOps: Remainder, Divide, Subtract --- FilterOps: GreaterThanOrEqual, Equal, LessThanOrEqual --- GroupBy: GroupBy -EXPLAIN SELECT csmallint, +POSTHOOK: query: EXPLAIN SELECT csmallint, (csmallint % -75) as c1, STDDEV_SAMP(csmallint) as c2, (-1.389 / csmallint) as c3, @@ -2003,12 +1859,7 @@ POSTHOOK: Input: default@alltypesorc -89 -14 0.0 0.015606742 NULL NULL 14 0.0 -14 1 89011 -95 -20 0.0 0.014621053 NULL NULL 20 0.0 -20 1 89011 WARNING: Comparing a bigint and a double may result in a loss of precision. -PREHOOK: query: -- TargetTypeClasses: Long, Double, Timestamp --- Functions: Var, Count, Sum, VarP, StDevP --- ArithmeticOps: Multiply, Add, Subtract, Remainder --- FilterOps: GreaterThan, LessThan, Equal, LessThanOrEqual, GreaterThanOrEqual --- GroupBy: GroupBy -EXPLAIN SELECT cdouble, +PREHOOK: query: EXPLAIN SELECT cdouble, VAR_SAMP(cdouble), (2563.58 * VAR_SAMP(cdouble)), (-(VAR_SAMP(cdouble))), @@ -2035,12 +1886,7 @@ WHERE (((cdouble > 2563.58)) GROUP BY cdouble ORDER BY cdouble PREHOOK: type: QUERY -POSTHOOK: query: -- TargetTypeClasses: Long, Double, Timestamp --- Functions: Var, Count, Sum, VarP, StDevP --- ArithmeticOps: Multiply, Add, Subtract, Remainder --- FilterOps: GreaterThan, LessThan, Equal, LessThanOrEqual, GreaterThanOrEqual --- GroupBy: GroupBy -EXPLAIN SELECT cdouble, +POSTHOOK: query: EXPLAIN SELECT cdouble, VAR_SAMP(cdouble), (2563.58 * VAR_SAMP(cdouble)), (-(VAR_SAMP(cdouble))), @@ -2201,12 +2047,7 @@ ORDER BY cdouble POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### -PREHOOK: query: -- TargetTypeClasses: Bool, Timestamp, String, Double, Long --- Functions: StDevP, Avg, Count, Min, Var, VarP, Sum --- ArithmeticOps: Multiply, Subtract, Add, Divide, Remainder --- FilterOps: NotEqual, LessThan, Like, Equal, RLike --- GroupBy: GroupBy -EXPLAIN SELECT ctimestamp1, +PREHOOK: query: EXPLAIN SELECT ctimestamp1, cstring1, STDDEV_POP(cint) as c1, (STDDEV_POP(cint) * 10.175) as c2, @@ -2261,12 +2102,7 @@ GROUP BY ctimestamp1, cstring1 ORDER BY ctimestamp1, cstring1, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16, c17, c18, c19, c20, c21, c22, c23, c24, c25, c26, c27, c28, c29, c30, c31, c32, c33, c34, c35, c36, c37 LIMIT 50 PREHOOK: type: QUERY -POSTHOOK: query: -- TargetTypeClasses: Bool, Timestamp, String, Double, Long --- Functions: StDevP, Avg, Count, Min, Var, VarP, Sum --- ArithmeticOps: Multiply, Subtract, Add, Divide, Remainder --- FilterOps: NotEqual, LessThan, Like, Equal, RLike --- GroupBy: GroupBy -EXPLAIN SELECT ctimestamp1, +POSTHOOK: query: EXPLAIN SELECT ctimestamp1, cstring1, STDDEV_POP(cint) as c1, (STDDEV_POP(cint) * 10.175) as c2, @@ -2563,12 +2399,7 @@ POSTHOOK: Input: default@alltypesorc 1969-12-31 15:59:46.82 NULL NULL NULL NULL 15601.0 NULL NULL 1 -1 NULL -46 NULL NULL 0.0 NULL NULL NULL 0.0 6.522017819370554E-4 NULL 0.0 NULL NULL -46.0 NULL 6.522017819364598E-4 46 15601.0 0.0 NULL NULL -0.571304 0.0 NULL NULL 1 1 NULL 1969-12-31 15:59:46.847 NULL NULL NULL NULL -7196.0 NULL NULL 1 -1 NULL -26 NULL NULL 0.0 NULL NULL NULL 0.0 -0.0014139799888827128 NULL 0.0 NULL NULL -26.0 NULL 0.001413979988882123 26 -7196.0 0.0 NULL NULL -1.010769 0.0 NULL NULL 1 1 NULL 1969-12-31 15:59:46.915 NULL NULL NULL NULL -200.0 NULL NULL 1 -1 NULL -25 NULL NULL 0.0 NULL NULL NULL 0.0 -0.050875000000000004 NULL 0.0 NULL NULL -25.0 NULL 0.0 25 -200.0 0.0 NULL NULL -1.051200 0.0 NULL NULL 1 1 NULL -PREHOOK: query: -- TargetTypeClasses: Double, Long, String, Timestamp, Bool --- Functions: Max, Sum, Var, Avg, Min, VarP, StDev, StDevP --- ArithmeticOps: Divide, Subtract, Remainder, Add, Multiply --- FilterOps: GreaterThan, LessThanOrEqual, Equal, LessThan, GreaterThanOrEqual, NotEqual, Like, RLike --- GroupBy: GroupBy -EXPLAIN SELECT cboolean1, +PREHOOK: query: EXPLAIN SELECT cboolean1, MAX(cfloat), (-(MAX(cfloat))), (-26.28 / MAX(cfloat)), @@ -2608,12 +2439,7 @@ WHERE (((cboolean1 IS NOT NULL)) GROUP BY cboolean1 ORDER BY cboolean1 PREHOOK: type: QUERY -POSTHOOK: query: -- TargetTypeClasses: Double, Long, String, Timestamp, Bool --- Functions: Max, Sum, Var, Avg, Min, VarP, StDev, StDevP --- ArithmeticOps: Divide, Subtract, Remainder, Add, Multiply --- FilterOps: GreaterThan, LessThanOrEqual, Equal, LessThan, GreaterThanOrEqual, NotEqual, Like, RLike --- GroupBy: GroupBy -EXPLAIN SELECT cboolean1, +POSTHOOK: query: EXPLAIN SELECT cboolean1, MAX(cfloat), (-(MAX(cfloat))), (-26.28 / MAX(cfloat)), @@ -2814,13 +2640,11 @@ POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### false 11.0 -11.0 -2.389090909090909 -17881597706 -17881597716.175 3.8953387713327066E17 6.0 -0.8249998 -2454.8879999999995 3.8953385925167296E17 -2145884705 1.66288903197104486E18 0.8249998 4.7840233756130287E-17 4.098424268084119E-17 0.8249998 -1051696618 28.692556844886422 2980633855.245 -4032330473.245 85.79562278396777 4032330473.245 -3983699.3106060605 3983699.3106060605 4.1896430920933255E15 true 79.553 -79.553 -0.33034580136836733 -401322621137 -401322621147.175 7.9255373737244976E16 34.727455139160156 -69.378 4856.6352637899645 7.9254972414623824E16 -2130544867 2.30133924842409523E18 69.378 3.456813247089758E-17 2.0387240975807185E-18 69.378 2182477964777 34.654968050508266 2959326820.263 2179518637956.737 9461.197516216069 -2179518637956.737 4.592756659884259E8 -4.592756659884259E8 1.002359020778021E21 -PREHOOK: query: -- These tests verify COUNT on empty or null colulmns work correctly. -create table test_count(i int) stored as orc +PREHOOK: query: create table test_count(i int) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_count -POSTHOOK: query: -- These tests verify COUNT on empty or null colulmns work correctly. -create table test_count(i int) stored as orc +POSTHOOK: query: create table test_count(i int) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_count diff --git a/ql/src/test/results/clientpositive/spark/vectorized_mapjoin.q.out b/ql/src/test/results/clientpositive/spark/vectorized_mapjoin.q.out index 7c4467a..d605185 100644 --- a/ql/src/test/results/clientpositive/spark/vectorized_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorized_mapjoin.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN SELECT COUNT(t1.cint), MAX(t2.cint), MIN(t1.cint), AVG(t1.cint+t2.cint) +PREHOOK: query: EXPLAIN SELECT COUNT(t1.cint), MAX(t2.cint), MIN(t1.cint), AVG(t1.cint+t2.cint) FROM alltypesorc t1 JOIN alltypesorc t2 ON t1.cint = t2.cint PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN SELECT COUNT(t1.cint), MAX(t2.cint), MIN(t1.cint), AVG(t1.cint+t2.cint) +POSTHOOK: query: EXPLAIN SELECT COUNT(t1.cint), MAX(t2.cint), MIN(t1.cint), AVG(t1.cint+t2.cint) FROM alltypesorc t1 JOIN alltypesorc t2 ON t1.cint = t2.cint POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/vectorized_nested_mapjoin.q.out b/ql/src/test/results/clientpositive/spark/vectorized_nested_mapjoin.q.out index a3d5947..9cfd789 100644 --- a/ql/src/test/results/clientpositive/spark/vectorized_nested_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorized_nested_mapjoin.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain select sum(t1.td) from (select v1.csmallint as tsi, v1.cdouble as td from alltypesorc v1, alltypesorc v2 where v1.ctinyint=v2.ctinyint) t1 join alltypesorc v3 on t1.tsi=v3.csmallint +PREHOOK: query: explain select sum(t1.td) from (select v1.csmallint as tsi, v1.cdouble as td from alltypesorc v1, alltypesorc v2 where v1.ctinyint=v2.ctinyint) t1 join alltypesorc v3 on t1.tsi=v3.csmallint PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain select sum(t1.td) from (select v1.csmallint as tsi, v1.cdouble as td from alltypesorc v1, alltypesorc v2 where v1.ctinyint=v2.ctinyint) t1 join alltypesorc v3 on t1.tsi=v3.csmallint +POSTHOOK: query: explain select sum(t1.td) from (select v1.csmallint as tsi, v1.cdouble as td from alltypesorc v1, alltypesorc v2 where v1.ctinyint=v2.ctinyint) t1 join alltypesorc v3 on t1.tsi=v3.csmallint POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-2 is a root stage diff --git a/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out b/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out index 2ab2541..8130912 100644 --- a/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorized_ptf.q.out @@ -1,21 +1,12 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE part_staging +PREHOOK: query: DROP TABLE part_staging PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -DROP TABLE part_staging +POSTHOOK: query: DROP TABLE part_staging POSTHOOK: type: DROPTABLE PREHOOK: query: DROP TABLE part_orc PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE part_orc POSTHOOK: type: DROPTABLE -PREHOOK: query: -- NOTE: This test is a copy of ptf. --- NOTE: We cannot vectorize "pure" table functions (e.g. NOOP) -- given their blackbox nature. So only queries without table functions and --- NOTE: with windowing will be vectorized. - --- data setup -CREATE TABLE part_staging( +PREHOOK: query: CREATE TABLE part_staging( p_partkey INT, p_name STRING, p_mfgr STRING, @@ -29,12 +20,7 @@ CREATE TABLE part_staging( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@part_staging -POSTHOOK: query: -- NOTE: This test is a copy of ptf. --- NOTE: We cannot vectorize "pure" table functions (e.g. NOOP) -- given their blackbox nature. So only queries without table functions and --- NOTE: with windowing will be vectorized. - --- data setup -CREATE TABLE part_staging( +POSTHOOK: query: CREATE TABLE part_staging( p_partkey INT, p_name STRING, p_mfgr STRING, @@ -118,9 +104,7 @@ POSTHOOK: Lineage: part_orc.p_partkey SIMPLE [(part_staging)part_staging.FieldSc POSTHOOK: Lineage: part_orc.p_retailprice SIMPLE [(part_staging)part_staging.FieldSchema(name:p_retailprice, type:double, comment:null), ] POSTHOOK: Lineage: part_orc.p_size SIMPLE [(part_staging)part_staging.FieldSchema(name:p_size, type:int, comment:null), ] POSTHOOK: Lineage: part_orc.p_type SIMPLE [(part_staging)part_staging.FieldSchema(name:p_type, type:string, comment:null), ] -PREHOOK: query: --1. test1 - -explain extended +PREHOOK: query: explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -130,9 +114,7 @@ from noop(on part_orc order by p_name ) PREHOOK: type: QUERY -POSTHOOK: query: --1. test1 - -explain extended +POSTHOOK: query: explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -371,18 +353,14 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 2. testJoinWithNoop - -explain extended +PREHOOK: query: explain extended select p_mfgr, p_name, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz from noop (on (select p1.* from part_orc p1 join part_orc p2 on p1.p_partkey = p2.p_partkey) j distribute by j.p_mfgr sort by j.p_name) PREHOOK: type: QUERY -POSTHOOK: query: -- 2. testJoinWithNoop - -explain extended +POSTHOOK: query: explain extended select p_mfgr, p_name, p_size, p_size - lag(p_size,1,p_size) over (partition by p_mfgr order by p_name) as deltaSz from noop (on (select p1.* from part_orc p1 join part_orc p2 on p1.p_partkey = p2.p_partkey) j @@ -694,17 +672,13 @@ Manufacturer#5 almond antique medium spring khaki 6 -25 Manufacturer#5 almond antique sky peru orange 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 46 44 Manufacturer#5 almond azure blanched chiffon midnight 23 -23 -PREHOOK: query: -- 3. testOnlyPTF - -explain extended +PREHOOK: query: explain extended select p_mfgr, p_name, p_size from noop(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY -POSTHOOK: query: -- 3. testOnlyPTF - -explain extended +POSTHOOK: query: explain extended select p_mfgr, p_name, p_size from noop(on part_orc partition by p_mfgr @@ -880,9 +854,7 @@ Manufacturer#5 almond antique medium spring khaki 6 Manufacturer#5 almond antique sky peru orange 2 Manufacturer#5 almond aquamarine dodger light gainsboro 46 Manufacturer#5 almond azure blanched chiffon midnight 23 -PREHOOK: query: -- 4. testPTFAlias - -explain extended +PREHOOK: query: explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -892,9 +864,7 @@ from noop(on part_orc order by p_name ) abc PREHOOK: type: QUERY -POSTHOOK: query: -- 4. testPTFAlias - -explain extended +POSTHOOK: query: explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -1133,9 +1103,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 5. testPTFAndWhereWithWindowing - -explain extended +PREHOOK: query: explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -1145,9 +1113,7 @@ from noop(on part_orc order by p_name ) PREHOOK: type: QUERY -POSTHOOK: query: -- 5. testPTFAndWhereWithWindowing - -explain extended +POSTHOOK: query: explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -1387,9 +1353,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 6 -25 Manufacturer#5 almond antique sky peru orange 2 3 3 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 46 44 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 23 -23 -PREHOOK: query: -- 6. testSWQAndPTFAndGBy - -explain extended +PREHOOK: query: explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -1400,9 +1364,7 @@ from noop(on part_orc ) group by p_mfgr, p_name, p_size PREHOOK: type: QUERY -POSTHOOK: query: -- 6. testSWQAndPTFAndGBy - -explain extended +POSTHOOK: query: explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -1653,18 +1615,14 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 6 -25 Manufacturer#5 almond antique sky peru orange 2 3 3 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 46 44 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 23 -23 -PREHOOK: query: -- 7. testJoin - -explain extended +PREHOOK: query: explain extended select abc.* from noop(on part_orc partition by p_mfgr order by p_name ) abc join part_orc p1 on abc.p_partkey = p1.p_partkey PREHOOK: type: QUERY -POSTHOOK: query: -- 7. testJoin - -explain extended +POSTHOOK: query: explain extended select abc.* from noop(on part_orc partition by p_mfgr @@ -1934,18 +1892,14 @@ POSTHOOK: Input: default@part_orc 85768 almond antique chartreuse lavender yellow Manufacturer#1 Brand#12 LARGE BRUSHED STEEL 34 SM BAG 1753.76 refull 86428 almond aquamarine burnished black steel Manufacturer#1 Brand#12 STANDARD ANODIZED STEEL 28 WRAP BAG 1414.42 arefully 90681 almond antique chartreuse khaki white Manufacturer#3 Brand#31 MEDIUM BURNISHED TIN 17 SM CASE 1671.68 are slyly after the sl -PREHOOK: query: -- 8. testJoinRight - -explain extended +PREHOOK: query: explain extended select abc.* from part_orc p1 join noop(on part_orc partition by p_mfgr order by p_name ) abc on abc.p_partkey = p1.p_partkey PREHOOK: type: QUERY -POSTHOOK: query: -- 8. testJoinRight - -explain extended +POSTHOOK: query: explain extended select abc.* from part_orc p1 join noop(on part_orc partition by p_mfgr @@ -2219,18 +2173,14 @@ POSTHOOK: Input: default@part_orc 85768 almond antique chartreuse lavender yellow Manufacturer#1 Brand#12 LARGE BRUSHED STEEL 34 SM BAG 1753.76 refull 86428 almond aquamarine burnished black steel Manufacturer#1 Brand#12 STANDARD ANODIZED STEEL 28 WRAP BAG 1414.42 arefully 90681 almond antique chartreuse khaki white Manufacturer#3 Brand#31 MEDIUM BURNISHED TIN 17 SM CASE 1671.68 are slyly after the sl -PREHOOK: query: -- 9. testNoopWithMap - -explain extended +PREHOOK: query: explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name, p_size desc) as r from noopwithmap(on part_orc partition by p_mfgr order by p_name, p_size desc) PREHOOK: type: QUERY -POSTHOOK: query: -- 9. testNoopWithMap - -explain extended +POSTHOOK: query: explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name, p_size desc) as r from noopwithmap(on part_orc @@ -2461,9 +2411,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 Manufacturer#5 almond antique sky peru orange 2 3 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 Manufacturer#5 almond azure blanched chiffon midnight 23 5 -PREHOOK: query: -- 10. testNoopWithMapWithWindowing - -explain extended +PREHOOK: query: explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -2472,9 +2420,7 @@ from noopwithmap(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY -POSTHOOK: query: -- 10. testNoopWithMapWithWindowing - -explain extended +POSTHOOK: query: explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -2726,9 +2672,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 11. testHavingWithWindowingPTFNoGBY - -explain extended +PREHOOK: query: explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -2737,9 +2681,7 @@ from noop(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY -POSTHOOK: query: -- 11. testHavingWithWindowingPTFNoGBY - -explain extended +POSTHOOK: query: explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -2975,9 +2917,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 12. testFunctionChain - -explain extended +PREHOOK: query: explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -2987,9 +2927,7 @@ partition by p_mfgr order by p_mfgr, p_name ))) PREHOOK: type: QUERY -POSTHOOK: query: -- 12. testFunctionChain - -explain extended +POSTHOOK: query: explain extended select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -3290,9 +3228,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 13. testPTFAndWindowingInSubQ - -explain extended +PREHOOK: query: explain extended select p_mfgr, p_name, sub1.cd, sub1.s1 from (select p_mfgr, p_name, @@ -3305,9 +3241,7 @@ order by p_name) window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) ) sub1 PREHOOK: type: QUERY -POSTHOOK: query: -- 13. testPTFAndWindowingInSubQ - -explain extended +POSTHOOK: query: explain extended select p_mfgr, p_name, sub1.cd, sub1.s1 from (select p_mfgr, p_name, @@ -3547,9 +3481,7 @@ Manufacturer#5 almond antique medium spring khaki 2 6208.18 Manufacturer#5 almond antique sky peru orange 3 7672.66 Manufacturer#5 almond aquamarine dodger light gainsboro 4 5882.970000000001 Manufacturer#5 almond azure blanched chiffon midnight 5 4271.3099999999995 -PREHOOK: query: -- 14. testPTFJoinWithWindowingWithCount - -explain extended +PREHOOK: query: explain extended select abc.p_mfgr, abc.p_name, rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r, dense_rank() over (distribute by abc.p_mfgr sort by abc.p_name) as dr, @@ -3561,9 +3493,7 @@ partition by p_mfgr order by p_name ) abc join part_orc p1 on abc.p_partkey = p1.p_partkey PREHOOK: type: QUERY -POSTHOOK: query: -- 14. testPTFJoinWithWindowingWithCount - -explain extended +POSTHOOK: query: explain extended select abc.p_mfgr, abc.p_name, rank() over (distribute by abc.p_mfgr sort by abc.p_name) as r, dense_rank() over (distribute by abc.p_mfgr sort by abc.p_name) as dr, @@ -3916,17 +3846,13 @@ Manufacturer#5 almond antique medium spring khaki 2 2 2 1611.66 3401.35000000000 Manufacturer#5 almond antique sky peru orange 3 3 3 1788.73 5190.08 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 4 4 4 1018.1 6208.18 46 44 Manufacturer#5 almond azure blanched chiffon midnight 5 5 5 1464.48 7672.66 23 -23 -PREHOOK: query: -- 15. testDistinctInSelectWithPTF - -explain extended +PREHOOK: query: explain extended select DISTINCT p_mfgr, p_name, p_size from noop(on part_orc partition by p_mfgr order by p_name) PREHOOK: type: QUERY -POSTHOOK: query: -- 15. testDistinctInSelectWithPTF - -explain extended +POSTHOOK: query: explain extended select DISTINCT p_mfgr, p_name, p_size from noop(on part_orc partition by p_mfgr @@ -4124,8 +4050,7 @@ Manufacturer#5 almond antique medium spring khaki 6 Manufacturer#5 almond antique sky peru orange 2 Manufacturer#5 almond aquamarine dodger light gainsboro 46 Manufacturer#5 almond azure blanched chiffon midnight 23 -PREHOOK: query: -- 16. testViewAsTableInputToPTF -create view IF NOT EXISTS mfgr_price_view as +PREHOOK: query: create view IF NOT EXISTS mfgr_price_view as select p_mfgr, p_brand, sum(p_retailprice) as s from part_orc @@ -4134,8 +4059,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@part_orc PREHOOK: Output: database:default PREHOOK: Output: default@mfgr_price_view -POSTHOOK: query: -- 16. testViewAsTableInputToPTF -create view IF NOT EXISTS mfgr_price_view as +POSTHOOK: query: create view IF NOT EXISTS mfgr_price_view as select p_mfgr, p_brand, sum(p_retailprice) as s from part_orc @@ -4375,8 +4299,7 @@ Manufacturer#4 Brand#42 2581.6800000000003 7337.620000000001 Manufacturer#5 Brand#51 1611.66 1611.66 Manufacturer#5 Brand#52 3254.17 4865.83 Manufacturer#5 Brand#53 2806.83 7672.66 -PREHOOK: query: -- 17. testMultipleInserts2SWQsWithPTF -CREATE TABLE part_4( +PREHOOK: query: CREATE TABLE part_4( p_mfgr STRING, p_name STRING, p_size INT, @@ -4386,8 +4309,7 @@ s DOUBLE) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@part_4 -POSTHOOK: query: -- 17. testMultipleInserts2SWQsWithPTF -CREATE TABLE part_4( +POSTHOOK: query: CREATE TABLE part_4( p_mfgr STRING, p_name STRING, p_size INT, @@ -4972,9 +4894,7 @@ Manufacturer#5 almond antique medium spring khaki 6 8 2 2 0.4 31 Manufacturer#5 almond antique sky peru orange 2 2 3 3 0.6 31 Manufacturer#5 almond aquamarine dodger light gainsboro 46 46 4 4 0.8 6 Manufacturer#5 almond azure blanched chiffon midnight 23 23 5 5 1.0 2 -PREHOOK: query: -- 18. testMulti2OperatorsFunctionChainWithMap - -explain extended +PREHOOK: query: explain extended select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name) as r, dense_rank() over (partition by p_mfgr,p_name) as dr, @@ -4991,9 +4911,7 @@ from noop(on partition by p_mfgr,p_name order by p_mfgr,p_name) PREHOOK: type: QUERY -POSTHOOK: query: -- 18. testMulti2OperatorsFunctionChainWithMap - -explain extended +POSTHOOK: query: explain extended select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name) as r, dense_rank() over (partition by p_mfgr,p_name) as dr, @@ -5322,9 +5240,7 @@ Manufacturer#5 almond antique medium spring khaki 1 1 6 6 Manufacturer#5 almond antique sky peru orange 1 1 2 2 Manufacturer#5 almond aquamarine dodger light gainsboro 1 1 46 46 Manufacturer#5 almond azure blanched chiffon midnight 1 1 23 23 -PREHOOK: query: -- 19. testMulti3OperatorsFunctionChain - -explain extended +PREHOOK: query: explain extended select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -5341,9 +5257,7 @@ from noop(on partition by p_mfgr order by p_mfgr ) PREHOOK: type: QUERY -POSTHOOK: query: -- 19. testMulti3OperatorsFunctionChain - -explain extended +POSTHOOK: query: explain extended select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -5672,9 +5586,7 @@ Manufacturer#5 almond antique medium spring khaki 2 2 6 37 Manufacturer#5 almond antique sky peru orange 3 3 2 39 Manufacturer#5 almond aquamarine dodger light gainsboro 4 4 46 85 Manufacturer#5 almond azure blanched chiffon midnight 5 5 23 108 -PREHOOK: query: -- 20. testMultiOperatorChainWithNoWindowing - -explain extended +PREHOOK: query: explain extended select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -5689,9 +5601,7 @@ from noop(on partition by p_mfgr order by p_mfgr)) PREHOOK: type: QUERY -POSTHOOK: query: -- 20. testMultiOperatorChainWithNoWindowing - -explain extended +POSTHOOK: query: explain extended select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -5990,9 +5900,7 @@ Manufacturer#5 almond antique medium spring khaki 2 2 6 37 Manufacturer#5 almond antique sky peru orange 3 3 2 39 Manufacturer#5 almond aquamarine dodger light gainsboro 4 4 46 85 Manufacturer#5 almond azure blanched chiffon midnight 5 5 23 108 -PREHOOK: query: -- 21. testMultiOperatorChainEndsWithNoopMap - -explain extended +PREHOOK: query: explain extended select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name) as r, dense_rank() over (partition by p_mfgr,p_name) as dr, @@ -6009,9 +5917,7 @@ from noopwithmap(on partition by p_mfgr,p_name order by p_mfgr,p_name) PREHOOK: type: QUERY -POSTHOOK: query: -- 21. testMultiOperatorChainEndsWithNoopMap - -explain extended +POSTHOOK: query: explain extended select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name) as r, dense_rank() over (partition by p_mfgr,p_name) as dr, @@ -6357,9 +6263,7 @@ Manufacturer#5 almond antique medium spring khaki 1 1 6 6 Manufacturer#5 almond antique sky peru orange 1 1 2 2 Manufacturer#5 almond aquamarine dodger light gainsboro 1 1 46 46 Manufacturer#5 almond azure blanched chiffon midnight 1 1 23 23 -PREHOOK: query: -- 22. testMultiOperatorChainWithDiffPartitionForWindow1 - -explain extended +PREHOOK: query: explain extended select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as r, dense_rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as dr, @@ -6375,9 +6279,7 @@ from noop(on order by p_mfgr )) PREHOOK: type: QUERY -POSTHOOK: query: -- 22. testMultiOperatorChainWithDiffPartitionForWindow1 - -explain extended +POSTHOOK: query: explain extended select p_mfgr, p_name, rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as r, dense_rank() over (partition by p_mfgr,p_name order by p_mfgr,p_name) as dr, @@ -6696,9 +6598,7 @@ Manufacturer#5 almond antique medium spring khaki 1 1 6 6 6 Manufacturer#5 almond antique sky peru orange 1 1 2 2 2 Manufacturer#5 almond aquamarine dodger light gainsboro 1 1 46 46 46 Manufacturer#5 almond azure blanched chiffon midnight 1 1 23 23 23 -PREHOOK: query: -- 23. testMultiOperatorChainWithDiffPartitionForWindow2 - -explain extended +PREHOOK: query: explain extended select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, @@ -6712,9 +6612,7 @@ from noopwithmap(on order by p_mfgr, p_name) )) PREHOOK: type: QUERY -POSTHOOK: query: -- 23. testMultiOperatorChainWithDiffPartitionForWindow2 - -explain extended +POSTHOOK: query: explain extended select p_mfgr, p_name, rank() over (partition by p_mfgr order by p_name) as r, dense_rank() over (partition by p_mfgr order by p_name) as dr, diff --git a/ql/src/test/results/clientpositive/spark/vectorized_rcfile_columnar.q.out b/ql/src/test/results/clientpositive/spark/vectorized_rcfile_columnar.q.out index c35173e..c4cf9aa 100644 --- a/ql/src/test/results/clientpositive/spark/vectorized_rcfile_columnar.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorized_rcfile_columnar.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: --This query must pass even when vectorized reader is not available for ---RC files. The query must fall back to the non-vector mode and run successfully. - -CREATE table columnTable (key STRING, value STRING) +PREHOOK: query: CREATE table columnTable (key STRING, value STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED AS @@ -10,10 +7,7 @@ STORED AS PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@columnTable -POSTHOOK: query: --This query must pass even when vectorized reader is not available for ---RC files. The query must fall back to the non-vector mode and run successfully. - -CREATE table columnTable (key STRING, value STRING) +POSTHOOK: query: CREATE table columnTable (key STRING, value STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED AS diff --git a/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out b/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out index 535da25..97f12d4 100644 --- a/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorized_shufflejoin.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN SELECT COUNT(t1.cint) AS CNT, MAX(t2.cint) , MIN(t1.cint), AVG(t1.cint+t2.cint) +PREHOOK: query: EXPLAIN SELECT COUNT(t1.cint) AS CNT, MAX(t2.cint) , MIN(t1.cint), AVG(t1.cint+t2.cint) FROM alltypesorc t1 JOIN alltypesorc t2 ON t1.cint = t2.cint order by CNT PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN SELECT COUNT(t1.cint) AS CNT, MAX(t2.cint) , MIN(t1.cint), AVG(t1.cint+t2.cint) +POSTHOOK: query: EXPLAIN SELECT COUNT(t1.cint) AS CNT, MAX(t2.cint) , MIN(t1.cint), AVG(t1.cint+t2.cint) FROM alltypesorc t1 JOIN alltypesorc t2 ON t1.cint = t2.cint order by CNT POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/vectorized_string_funcs.q.out b/ql/src/test/results/clientpositive/spark/vectorized_string_funcs.q.out index bfac939..560235d 100644 --- a/ql/src/test/results/clientpositive/spark/vectorized_string_funcs.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorized_string_funcs.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- Test string functions in vectorized mode to verify end-to-end functionality. - -explain +PREHOOK: query: explain select substr(cstring1, 1, 2) ,substr(cstring1, 2) @@ -16,15 +14,13 @@ select ,concat(cstring1, '<') ,concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2)) from alltypesorc --- Limit the number of rows of output to a reasonable amount. + where cbigint % 237 = 0 --- Test function use in the WHERE clause. + and length(substr(cstring1, 1, 2)) <= 2 and cstring1 like '%' PREHOOK: type: QUERY -POSTHOOK: query: -- Test string functions in vectorized mode to verify end-to-end functionality. - -explain +POSTHOOK: query: explain select substr(cstring1, 1, 2) ,substr(cstring1, 2) @@ -40,9 +36,9 @@ select ,concat(cstring1, '<') ,concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2)) from alltypesorc --- Limit the number of rows of output to a reasonable amount. + where cbigint % 237 = 0 --- Test function use in the WHERE clause. + and length(substr(cstring1, 1, 2)) <= 2 and cstring1 like '%' POSTHOOK: type: QUERY @@ -78,9 +74,9 @@ PREHOOK: query: select ,concat(cstring1, '<') ,concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2)) from alltypesorc --- Limit the number of rows of output to a reasonable amount. + where cbigint % 237 = 0 --- Test function use in the WHERE clause. + and length(substr(cstring1, 1, 2)) <= 2 and cstring1 like '%' PREHOOK: type: QUERY @@ -101,9 +97,9 @@ POSTHOOK: query: select ,concat(cstring1, '<') ,concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2)) from alltypesorc --- Limit the number of rows of output to a reasonable amount. + where cbigint % 237 = 0 --- Test function use in the WHERE clause. + and length(substr(cstring1, 1, 2)) <= 2 and cstring1 like '%' POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out index 5eb896f..9948112 100644 --- a/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out +++ b/ql/src/test/results/clientpositive/spark/vectorized_timestamp_funcs.q.out @@ -1,18 +1,8 @@ -PREHOOK: query: -- Test timestamp functions in vectorized mode to verify they run correctly end-to-end. --- Turning on vectorization has been temporarily moved after filling the test table --- due to bug HIVE-8197. - - -CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, stimestamp1 string) STORED AS ORC +PREHOOK: query: CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, stimestamp1 string) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@alltypesorc_string -POSTHOOK: query: -- Test timestamp functions in vectorized mode to verify they run correctly end-to-end. --- Turning on vectorization has been temporarily moved after filling the test table --- due to bug HIVE-8197. - - -CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, stimestamp1 string) STORED AS ORC +POSTHOOK: query: CREATE TABLE alltypesorc_string(ctimestamp1 timestamp, stimestamp1 string) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@alltypesorc_string @@ -432,8 +422,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Should all be true or NULL -SELECT +PREHOOK: query: SELECT to_unix_timestamp(ctimestamp1) = to_unix_timestamp(stimestamp1) AS c1, year(ctimestamp1) = year(stimestamp1), month(ctimestamp1) = month(stimestamp1), @@ -448,8 +437,7 @@ ORDER BY c1 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc_string #### A masked pattern was here #### -POSTHOOK: query: -- Should all be true or NULL -SELECT +POSTHOOK: query: SELECT to_unix_timestamp(ctimestamp1) = to_unix_timestamp(stimestamp1) AS c1, year(ctimestamp1) = year(stimestamp1), month(ctimestamp1) = month(stimestamp1), @@ -504,8 +492,7 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: -- Wrong format. Should all be NULL. -EXPLAIN SELECT +PREHOOK: query: EXPLAIN SELECT to_unix_timestamp(stimestamp1) AS c1, year(stimestamp1), month(stimestamp1), @@ -518,8 +505,7 @@ EXPLAIN SELECT FROM alltypesorc_wrong ORDER BY c1 PREHOOK: type: QUERY -POSTHOOK: query: -- Wrong format. Should all be NULL. -EXPLAIN SELECT +POSTHOOK: query: EXPLAIN SELECT to_unix_timestamp(stimestamp1) AS c1, year(stimestamp1), month(stimestamp1), @@ -697,13 +683,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc_string #### A masked pattern was here #### NULL NULL 0 40 -PREHOOK: query: -- SUM of timestamps are not vectorized reduce-side because they produce a double instead of a long (HIVE-8211)... -EXPLAIN SELECT +PREHOOK: query: EXPLAIN SELECT round(sum(ctimestamp1), 3) FROM alltypesorc_string PREHOOK: type: QUERY -POSTHOOK: query: -- SUM of timestamps are not vectorized reduce-side because they produce a double instead of a long (HIVE-8211)... -EXPLAIN SELECT +POSTHOOK: query: EXPLAIN SELECT round(sum(ctimestamp1), 3) FROM alltypesorc_string POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/spark/windowing.q.out b/ql/src/test/results/clientpositive/spark/windowing.q.out index 72b2245..aeeeaa5 100644 --- a/ql/src/test/results/clientpositive/spark/windowing.q.out +++ b/ql/src/test/results/clientpositive/spark/windowing.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 1. testWindowing -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s1 @@ -9,10 +6,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 1. testWindowing -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s1 @@ -46,8 +40,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 2. testGroupByWithPartitioning -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, min(p_retailprice), rank() over(distribute by p_mfgr sort by p_name)as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, @@ -57,8 +50,7 @@ group by p_mfgr, p_name, p_size PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 2. testGroupByWithPartitioning -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, min(p_retailprice), rank() over(distribute by p_mfgr sort by p_name)as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, @@ -93,8 +85,7 @@ Manufacturer#5 almond antique medium spring khaki 6 1611.66 2 2 6 -25 Manufacturer#5 almond antique sky peru orange 2 1788.73 3 3 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 46 1018.1 4 4 46 44 Manufacturer#5 almond azure blanched chiffon midnight 23 1464.48 5 5 23 -23 -PREHOOK: query: -- 3. testGroupByHavingWithSWQ -select p_mfgr, p_name, p_size, min(p_retailprice), +PREHOOK: query: select p_mfgr, p_name, p_size, min(p_retailprice), rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over(distribute by p_mfgr sort by p_name) as deltaSz @@ -104,8 +95,7 @@ having p_size > 0 PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 3. testGroupByHavingWithSWQ -select p_mfgr, p_name, p_size, min(p_retailprice), +POSTHOOK: query: select p_mfgr, p_name, p_size, min(p_retailprice), rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over(distribute by p_mfgr sort by p_name) as deltaSz @@ -140,15 +130,13 @@ Manufacturer#5 almond antique medium spring khaki 6 1611.66 2 2 6 -25 Manufacturer#5 almond antique sky peru orange 2 1788.73 3 3 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 46 1018.1 4 4 46 44 Manufacturer#5 almond azure blanched chiffon midnight 23 1464.48 5 5 23 -23 -PREHOOK: query: -- 4. testCount -select p_mfgr, p_name, +PREHOOK: query: select p_mfgr, p_name, count(p_size) over(distribute by p_mfgr sort by p_name) as cd from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 4. testCount -select p_mfgr, p_name, +POSTHOOK: query: select p_mfgr, p_name, count(p_size) over(distribute by p_mfgr sort by p_name) as cd from part POSTHOOK: type: QUERY @@ -180,8 +168,7 @@ Manufacturer#5 almond antique medium spring khaki 2 Manufacturer#5 almond antique sky peru orange 3 Manufacturer#5 almond aquamarine dodger light gainsboro 4 Manufacturer#5 almond azure blanched chiffon midnight 5 -PREHOOK: query: -- 5. testCountWithWindowingUDAF -select p_mfgr, p_name, +PREHOOK: query: select p_mfgr, p_name, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, count(p_size) over(distribute by p_mfgr sort by p_name) as cd, @@ -191,8 +178,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 5. testCountWithWindowingUDAF -select p_mfgr, p_name, +POSTHOOK: query: select p_mfgr, p_name, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, count(p_size) over(distribute by p_mfgr sort by p_name) as cd, @@ -228,8 +214,7 @@ Manufacturer#5 almond antique medium spring khaki 2 2 2 1611.66 3401.35000000000 Manufacturer#5 almond antique sky peru orange 3 3 3 1788.73 5190.08 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 4 4 4 1018.1 6208.18 46 44 Manufacturer#5 almond azure blanched chiffon midnight 5 5 5 1464.48 7672.66 23 -23 -PREHOOK: query: -- 6. testCountInSubQ -select sub1.r, sub1.dr, sub1.cd, sub1.s1, sub1.deltaSz +PREHOOK: query: select sub1.r, sub1.dr, sub1.cd, sub1.s1, sub1.deltaSz from (select p_mfgr, p_name, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, @@ -241,8 +226,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 6. testCountInSubQ -select sub1.r, sub1.dr, sub1.cd, sub1.s1, sub1.deltaSz +POSTHOOK: query: select sub1.r, sub1.dr, sub1.cd, sub1.s1, sub1.deltaSz from (select p_mfgr, p_name, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, @@ -280,8 +264,7 @@ POSTHOOK: Input: default@part 5 5 5 7672.66 -23 5 5 5 8923.62 -7 6 5 6 8749.730000000001 14 -PREHOOK: query: -- 7. testJoinWithWindowingAndPTF -select abc.p_mfgr, abc.p_name, +PREHOOK: query: select abc.p_mfgr, abc.p_name, rank() over(distribute by abc.p_mfgr sort by abc.p_name) as r, dense_rank() over(distribute by abc.p_mfgr sort by abc.p_name) as dr, abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, @@ -293,8 +276,7 @@ order by p_name PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 7. testJoinWithWindowingAndPTF -select abc.p_mfgr, abc.p_name, +POSTHOOK: query: select abc.p_mfgr, abc.p_name, rank() over(distribute by abc.p_mfgr sort by abc.p_name) as r, dense_rank() over(distribute by abc.p_mfgr sort by abc.p_name) as dr, abc.p_retailprice, sum(abc.p_retailprice) over (distribute by abc.p_mfgr sort by abc.p_name rows between unbounded preceding and current row) as s1, @@ -334,15 +316,13 @@ Manufacturer#5 almond antique medium spring khaki 2 2 1611.66 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 3 3 1788.73 5190.08 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 4 4 1018.1 6208.18 46 44 Manufacturer#5 almond azure blanched chiffon midnight 5 5 1464.48 7672.66 23 -23 -PREHOOK: query: -- 8. testMixedCaseAlias -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name, p_size desc) as R from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 8. testMixedCaseAlias -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name, p_size desc) as R from part POSTHOOK: type: QUERY @@ -374,8 +354,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 Manufacturer#5 almond antique sky peru orange 2 3 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 Manufacturer#5 almond azure blanched chiffon midnight 23 5 -PREHOOK: query: -- 9. testHavingWithWindowingNoGBY -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s1 @@ -383,8 +362,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 9. testHavingWithWindowingNoGBY -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s1 @@ -418,8 +396,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 10. testHavingWithWindowingCondRankNoGBY -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s1 @@ -427,8 +404,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 10. testHavingWithWindowingCondRankNoGBY -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between unbounded preceding and current row) as s1 @@ -462,8 +438,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 3 3 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 7672.66 -PREHOOK: query: -- 11. testFirstLast -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, sum(p_size) over (distribute by p_mfgr sort by p_name rows between current row and current row) as s2, first_value(p_size) over w1 as f, last_value(p_size, false) over w1 as l @@ -472,8 +447,7 @@ window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 11. testFirstLast -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, sum(p_size) over (distribute by p_mfgr sort by p_name rows between current row and current row) as s2, first_value(p_size) over w1 as f, last_value(p_size, false) over w1 as l @@ -508,8 +482,7 @@ Manufacturer#5 almond antique medium spring khaki 6 6 31 46 Manufacturer#5 almond antique sky peru orange 2 2 31 23 Manufacturer#5 almond aquamarine dodger light gainsboro 46 46 6 23 Manufacturer#5 almond azure blanched chiffon midnight 23 23 2 23 -PREHOOK: query: -- 12. testFirstLastWithWhere -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, sum(p_size) over (distribute by p_mfgr sort by p_name rows between current row and current row) as s2, first_value(p_size) over w1 as f, @@ -520,8 +493,7 @@ window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 12. testFirstLastWithWhere -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, sum(p_size) over (distribute by p_mfgr sort by p_name rows between current row and current row) as s2, first_value(p_size) over w1 as f, @@ -537,8 +509,7 @@ Manufacturer#3 almond antique forest lavender goldenrod 14 2 14 17 1 Manufacturer#3 almond antique metallic orange dim 19 3 19 17 45 Manufacturer#3 almond antique misty red olive 1 4 1 14 45 Manufacturer#3 almond antique olive coral navajo 45 5 45 19 45 -PREHOOK: query: -- 13. testSumWindow -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over (distribute by p_mfgr sort by p_name rows between current row and current row) as s2 from part @@ -546,8 +517,7 @@ window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 13. testSumWindow -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over (distribute by p_mfgr sort by p_name rows between current row and current row) as s2 from part @@ -581,16 +551,14 @@ Manufacturer#5 almond antique medium spring khaki 6 85 6 Manufacturer#5 almond antique sky peru orange 2 108 2 Manufacturer#5 almond aquamarine dodger light gainsboro 46 77 46 Manufacturer#5 almond azure blanched chiffon midnight 23 71 23 -PREHOOK: query: -- 14. testNoSortClause -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr from part window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 following) PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 14. testNoSortClause -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr from part window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 following) @@ -623,8 +591,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 Manufacturer#5 almond antique sky peru orange 2 3 3 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 -PREHOOK: query: -- 15. testExpressions -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, cume_dist() over(distribute by p_mfgr sort by p_name) as cud, @@ -641,8 +608,7 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 precedi PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 15. testExpressions -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, cume_dist() over(distribute by p_mfgr sort by p_name) as cud, @@ -685,8 +651,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 0.4 0.25 1 2 18.5 12.5 1 Manufacturer#5 almond antique sky peru orange 2 3 3 0.6 0.5 2 3 13.0 12.832251036613439 1 2 31 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 0.8 0.75 2 4 21.25 18.102140757380052 1 46 6 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 1.0 1.0 3 5 21.6 16.206171663906314 1 23 2 -PREHOOK: query: -- 16. testMultipleWindows -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, cume_dist() over(distribute by p_mfgr sort by p_name) as cud, @@ -698,8 +663,7 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 precedi PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 16. testMultipleWindows -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, cume_dist() over(distribute by p_mfgr sort by p_name) as cud, @@ -737,8 +701,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 0.4 37 8 31 Manufacturer#5 almond antique sky peru orange 2 3 3 0.6 39 2 31 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 0.8 85 46 6 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 1.0 108 23 2 -PREHOOK: query: -- 17. testCountStar -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, count(*) over(distribute by p_mfgr sort by p_name ) as c, count(p_size) over(distribute by p_mfgr sort by p_name) as ca, first_value(p_size) over w1 as fvW1 @@ -747,8 +710,7 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 precedi PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 17. testCountStar -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, count(*) over(distribute by p_mfgr sort by p_name ) as c, count(p_size) over(distribute by p_mfgr sort by p_name) as ca, first_value(p_size) over w1 as fvW1 @@ -783,8 +745,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 31 Manufacturer#5 almond antique sky peru orange 2 3 3 31 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 2 -PREHOOK: query: -- 18. testUDAFs -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, sum(p_retailprice) over w1 as s, min(p_retailprice) over w1 as mi, max(p_retailprice) over w1 as ma, @@ -794,8 +755,7 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 precedi PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 18. testUDAFs -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, sum(p_retailprice) over w1 as s, min(p_retailprice) over w1 as mi, max(p_retailprice) over w1 as ma, @@ -831,8 +791,7 @@ Manufacturer#5 almond antique medium spring khaki 6 6208.18 1018.1 1789.69 1552. Manufacturer#5 almond antique sky peru orange 2 7672.66 1018.1 1789.69 1534.532 Manufacturer#5 almond aquamarine dodger light gainsboro 46 5882.969999999999 1018.1 1788.73 1470.7424999999998 Manufacturer#5 almond azure blanched chiffon midnight 23 4271.3099999999995 1018.1 1788.73 1423.7699999999998 -PREHOOK: query: -- 19. testUDAFsWithGBY -select p_mfgr,p_name, p_size, p_retailprice, +PREHOOK: query: select p_mfgr,p_name, p_size, p_retailprice, sum(p_retailprice) over w1 as s, min(p_retailprice) as mi , max(p_retailprice) as ma , @@ -843,8 +802,7 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 precedi PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 19. testUDAFsWithGBY -select p_mfgr,p_name, p_size, p_retailprice, +POSTHOOK: query: select p_mfgr,p_name, p_size, p_retailprice, sum(p_retailprice) over w1 as s, min(p_retailprice) as mi , max(p_retailprice) as ma , @@ -880,8 +838,7 @@ Manufacturer#5 almond antique medium spring khaki 6 1611.66 6208.18 1611.66 1611 Manufacturer#5 almond antique sky peru orange 2 1788.73 7672.66 1788.73 1788.73 1534.532 Manufacturer#5 almond aquamarine dodger light gainsboro 46 1018.1 5882.969999999999 1018.1 1018.1 1470.7424999999998 Manufacturer#5 almond azure blanched chiffon midnight 23 1464.48 4271.3099999999995 1464.48 1464.48 1423.7699999999998 -PREHOOK: query: -- 20. testSTATs -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, stddev(p_retailprice) over w1 as sdev, stddev_pop(p_retailprice) over w1 as sdev_pop, collect_set(p_size) over w1 as uniq_size, @@ -893,8 +850,7 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 precedi PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 20. testSTATs -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, stddev(p_retailprice) over w1 as sdev, stddev_pop(p_retailprice) over w1 as sdev_pop, collect_set(p_size) over w1 as uniq_size, @@ -932,8 +888,7 @@ Manufacturer#5 almond antique medium spring khaki 6 316.68049612345885 316.68049 Manufacturer#5 almond antique sky peru orange 2 285.40506298242155 285.40506298242155 [31,6,2,46,23] 81456.04997600002 -0.712858514567818 -3297.2011999999986 Manufacturer#5 almond aquamarine dodger light gainsboro 46 285.43749038756283 285.43749038756283 [6,2,46,23] 81474.56091875004 -0.984128787153391 -4871.028125000002 Manufacturer#5 almond azure blanched chiffon midnight 23 315.9225931564038 315.9225931564038 [2,46,23] 99807.08486666664 -0.9978877469246936 -5664.856666666666 -PREHOOK: query: -- 21. testDISTs -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, histogram_numeric(p_retailprice, 5) over w1 as hist, percentile(p_partkey, 0.5) over w1 as per, row_number() over(distribute by p_mfgr sort by p_mfgr, p_name) as rn @@ -942,8 +897,7 @@ window w1 as (distribute by p_mfgr sort by p_mfgr, p_name rows between 2 precedi PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 21. testDISTs -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, histogram_numeric(p_retailprice, 5) over w1 as hist, percentile(p_partkey, 0.5) over w1 as per, row_number() over(distribute by p_mfgr sort by p_mfgr, p_name) as rn @@ -978,8 +932,7 @@ Manufacturer#5 almond antique medium spring khaki 6 [{"x":1018.1,"y":1.0},{"x":1 Manufacturer#5 almond antique sky peru orange 2 [{"x":1018.1,"y":1.0},{"x":1464.48,"y":1.0},{"x":1611.66,"y":1.0},{"x":1788.73,"y":1.0},{"x":1789.69,"y":1.0}] 78486.0 3 Manufacturer#5 almond aquamarine dodger light gainsboro 46 [{"x":1018.1,"y":1.0},{"x":1464.48,"y":1.0},{"x":1611.66,"y":1.0},{"x":1788.73,"y":1.0}] 60577.5 4 Manufacturer#5 almond azure blanched chiffon midnight 23 [{"x":1018.1,"y":1.0},{"x":1464.48,"y":1.0},{"x":1788.73,"y":1.0}] 78486.0 5 -PREHOOK: query: -- 22. testViewAsTableInputWithWindowing -create view IF NOT EXISTS mfgr_price_view as +PREHOOK: query: create view IF NOT EXISTS mfgr_price_view as select p_mfgr, p_brand, round(sum(p_retailprice),2) as s from part @@ -988,8 +941,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@part PREHOOK: Output: database:default PREHOOK: Output: default@mfgr_price_view -POSTHOOK: query: -- 22. testViewAsTableInputWithWindowing -create view IF NOT EXISTS mfgr_price_view as +POSTHOOK: query: create view IF NOT EXISTS mfgr_price_view as select p_mfgr, p_brand, round(sum(p_retailprice),2) as s from part @@ -1070,8 +1022,7 @@ Manufacturer#4 Brand#42 2581.68 7337.62 Manufacturer#5 Brand#51 1611.66 1611.66 Manufacturer#5 Brand#52 3254.17 4865.83 Manufacturer#5 Brand#53 2806.83 7672.66 -PREHOOK: query: -- 23. testCreateViewWithWindowingQuery -create view IF NOT EXISTS mfgr_brand_price_view as +PREHOOK: query: create view IF NOT EXISTS mfgr_brand_price_view as select p_mfgr, p_brand, sum(p_retailprice) over w1 as s from part @@ -1080,8 +1031,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@part PREHOOK: Output: database:default PREHOOK: Output: default@mfgr_brand_price_view -POSTHOOK: query: -- 23. testCreateViewWithWindowingQuery -create view IF NOT EXISTS mfgr_brand_price_view as +POSTHOOK: query: create view IF NOT EXISTS mfgr_brand_price_view as select p_mfgr, p_brand, sum(p_retailprice) over w1 as s from part @@ -1126,8 +1076,7 @@ Manufacturer#5 Brand#52 1789.69 Manufacturer#5 Brand#52 4271.3099999999995 Manufacturer#5 Brand#53 4418.49 Manufacturer#5 Brand#53 5190.08 -PREHOOK: query: -- 24. testLateralViews -select p_mfgr, p_name, +PREHOOK: query: select p_mfgr, p_name, lv_col, p_size, sum(p_size) over w1 as s from (select p_mfgr, p_name, p_size, array(1,2,3) arr from part) p lateral view explode(arr) part_lv as lv_col @@ -1135,8 +1084,7 @@ window w1 as (distribute by p_mfgr sort by p_size, lv_col rows between 2 precedi PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 24. testLateralViews -select p_mfgr, p_name, +POSTHOOK: query: select p_mfgr, p_name, lv_col, p_size, sum(p_size) over w1 as s from (select p_mfgr, p_name, p_size, array(1,2,3) arr from part) p lateral view explode(arr) part_lv as lv_col @@ -1222,8 +1170,7 @@ Manufacturer#5 almond aquamarine dodger light gainsboro 3 46 138 Manufacturer#5 almond azure blanched chiffon midnight 1 23 35 Manufacturer#5 almond azure blanched chiffon midnight 2 23 52 Manufacturer#5 almond azure blanched chiffon midnight 3 23 69 -PREHOOK: query: -- 25. testMultipleInserts3SWQs -CREATE TABLE part_1( +PREHOOK: query: CREATE TABLE part_1( p_mfgr STRING, p_name STRING, p_size INT, @@ -1233,8 +1180,7 @@ s DOUBLE) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@part_1 -POSTHOOK: query: -- 25. testMultipleInserts3SWQs -CREATE TABLE part_1( +POSTHOOK: query: CREATE TABLE part_1( p_mfgr STRING, p_name STRING, p_size INT, @@ -1460,8 +1406,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 2 31 Manufacturer#5 almond antique sky peru orange 2 3 3 31 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 4 6 Manufacturer#5 almond azure blanched chiffon midnight 23 5 5 2 -PREHOOK: query: -- 26. testGroupByHavingWithSWQAndAlias -select p_mfgr, p_name, p_size, min(p_retailprice) as mi, +PREHOOK: query: select p_mfgr, p_name, p_size, min(p_retailprice) as mi, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over(distribute by p_mfgr sort by p_name) as deltaSz @@ -1471,8 +1416,7 @@ having p_size > 0 PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 26. testGroupByHavingWithSWQAndAlias -select p_mfgr, p_name, p_size, min(p_retailprice) as mi, +POSTHOOK: query: select p_mfgr, p_name, p_size, min(p_retailprice) as mi, rank() over(distribute by p_mfgr sort by p_name) as r, dense_rank() over(distribute by p_mfgr sort by p_name) as dr, p_size, p_size - lag(p_size,1,p_size) over(distribute by p_mfgr sort by p_name) as deltaSz @@ -1507,8 +1451,7 @@ Manufacturer#5 almond antique medium spring khaki 6 1611.66 2 2 6 -25 Manufacturer#5 almond antique sky peru orange 2 1788.73 3 3 2 -4 Manufacturer#5 almond aquamarine dodger light gainsboro 46 1018.1 4 4 46 44 Manufacturer#5 almond azure blanched chiffon midnight 23 1464.48 5 5 23 -23 -PREHOOK: query: -- 27. testMultipleRangeWindows -select p_mfgr,p_name, p_size, +PREHOOK: query: select p_mfgr,p_name, p_size, sum(p_size) over (distribute by p_mfgr sort by p_size range between 10 preceding and current row) as s2, sum(p_size) over (distribute by p_mfgr sort by p_size range between current row and 10 following ) as s1 from part @@ -1516,8 +1459,7 @@ window w1 as (rows between 2 preceding and 2 following) PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 27. testMultipleRangeWindows -select p_mfgr,p_name, p_size, +POSTHOOK: query: select p_mfgr,p_name, p_size, sum(p_size) over (distribute by p_mfgr sort by p_size range between 10 preceding and current row) as s2, sum(p_size) over (distribute by p_mfgr sort by p_size range between current row and 10 following ) as s1 from part @@ -1551,15 +1493,13 @@ Manufacturer#5 almond antique medium spring khaki 6 8 6 Manufacturer#5 almond antique sky peru orange 2 2 8 Manufacturer#5 almond aquamarine dodger light gainsboro 46 46 46 Manufacturer#5 almond azure blanched chiffon midnight 23 23 54 -PREHOOK: query: -- 28. testPartOrderInUDAFInvoke -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) as s from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 28. testPartOrderInUDAFInvoke -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) as s from part POSTHOOK: type: QUERY @@ -1591,16 +1531,14 @@ Manufacturer#5 almond antique medium spring khaki 6 85 Manufacturer#5 almond antique sky peru orange 2 108 Manufacturer#5 almond aquamarine dodger light gainsboro 46 77 Manufacturer#5 almond azure blanched chiffon midnight 23 71 -PREHOOK: query: -- 29. testPartOrderInWdwDef -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s from part window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 29. testPartOrderInWdwDef -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s from part window w1 as (partition by p_mfgr order by p_name rows between 2 preceding and 2 following) @@ -1633,8 +1571,7 @@ Manufacturer#5 almond antique medium spring khaki 6 85 Manufacturer#5 almond antique sky peru orange 2 108 Manufacturer#5 almond aquamarine dodger light gainsboro 46 77 Manufacturer#5 almond azure blanched chiffon midnight 23 71 -PREHOOK: query: -- 30. testDefaultPartitioningSpecRules -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s, sum(p_size) over w2 as s2 from part @@ -1643,8 +1580,7 @@ window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 30. testDefaultPartitioningSpecRules -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s, sum(p_size) over w2 as s2 from part @@ -1679,8 +1615,7 @@ Manufacturer#5 almond antique medium spring khaki 6 85 37 Manufacturer#5 almond antique sky peru orange 2 108 39 Manufacturer#5 almond aquamarine dodger light gainsboro 46 77 85 Manufacturer#5 almond azure blanched chiffon midnight 23 71 108 -PREHOOK: query: -- 31. testWindowCrossReference -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over w2 as s2 from part @@ -1689,8 +1624,7 @@ window w1 as (partition by p_mfgr order by p_name range between 2 preceding and PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 31. testWindowCrossReference -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over w2 as s2 from part @@ -1725,8 +1659,7 @@ Manufacturer#5 almond antique medium spring khaki 6 6 6 Manufacturer#5 almond antique sky peru orange 2 2 2 Manufacturer#5 almond aquamarine dodger light gainsboro 46 46 46 Manufacturer#5 almond azure blanched chiffon midnight 23 23 23 -PREHOOK: query: -- 32. testWindowInheritance -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over w2 as s2 from part @@ -1735,8 +1668,7 @@ window w1 as (partition by p_mfgr order by p_name range between 2 preceding and PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 32. testWindowInheritance -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over w2 as s2 from part @@ -1771,8 +1703,7 @@ Manufacturer#5 almond antique medium spring khaki 6 6 37 Manufacturer#5 almond antique sky peru orange 2 2 39 Manufacturer#5 almond aquamarine dodger light gainsboro 46 46 85 Manufacturer#5 almond azure blanched chiffon midnight 23 23 108 -PREHOOK: query: -- 33. testWindowForwardReference -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over w2 as s2, sum(p_size) over w3 as s3 @@ -1783,8 +1714,7 @@ window w1 as (distribute by p_mfgr sort by p_name range between 2 preceding and PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 33. testWindowForwardReference -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over w2 as s2, sum(p_size) over w3 as s3 @@ -1821,8 +1751,7 @@ Manufacturer#5 almond antique medium spring khaki 6 6 37 37 Manufacturer#5 almond antique sky peru orange 2 2 39 39 Manufacturer#5 almond aquamarine dodger light gainsboro 46 46 85 85 Manufacturer#5 almond azure blanched chiffon midnight 23 23 108 108 -PREHOOK: query: -- 34. testWindowDefinitionPropagation -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over w2 as s2, sum(p_size) over (w3 rows between 2 preceding and 2 following) as s3 @@ -1833,8 +1762,7 @@ window w1 as (distribute by p_mfgr sort by p_name range between 2 preceding and PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 34. testWindowDefinitionPropagation -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_size) over w1 as s1, sum(p_size) over w2 as s2, sum(p_size) over (w3 rows between 2 preceding and 2 following) as s3 @@ -1871,15 +1799,13 @@ Manufacturer#5 almond antique medium spring khaki 6 6 37 85 Manufacturer#5 almond antique sky peru orange 2 2 39 108 Manufacturer#5 almond aquamarine dodger light gainsboro 46 46 85 77 Manufacturer#5 almond azure blanched chiffon midnight 23 23 108 71 -PREHOOK: query: -- 35. testDistinctWithWindowing -explain +PREHOOK: query: explain select DISTINCT p_mfgr, p_name, p_size, sum(p_size) over w1 as s from part window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 following) PREHOOK: type: QUERY -POSTHOOK: query: -- 35. testDistinctWithWindowing -explain +POSTHOOK: query: explain select DISTINCT p_mfgr, p_name, p_size, sum(p_size) over w1 as s from part @@ -2009,15 +1935,13 @@ Manufacturer#5 almond antique medium spring khaki 6 85 Manufacturer#5 almond antique sky peru orange 2 108 Manufacturer#5 almond aquamarine dodger light gainsboro 46 77 Manufacturer#5 almond azure blanched chiffon midnight 23 71 -PREHOOK: query: -- 36. testRankWithPartitioning -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name ) as r from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 36. testRankWithPartitioning -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, rank() over (partition by p_mfgr order by p_name ) as r from part POSTHOOK: type: QUERY @@ -2049,8 +1973,7 @@ Manufacturer#5 almond antique medium spring khaki 6 2 Manufacturer#5 almond antique sky peru orange 2 3 Manufacturer#5 almond aquamarine dodger light gainsboro 46 4 Manufacturer#5 almond azure blanched chiffon midnight 23 5 -PREHOOK: query: -- 37. testPartitioningVariousForms -select p_mfgr, +PREHOOK: query: select p_mfgr, round(sum(p_retailprice) over (partition by p_mfgr order by p_mfgr),2) as s1, min(p_retailprice) over (partition by p_mfgr) as s2, max(p_retailprice) over (distribute by p_mfgr sort by p_mfgr) as s3, @@ -2060,8 +1983,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 37. testPartitioningVariousForms -select p_mfgr, +POSTHOOK: query: select p_mfgr, round(sum(p_retailprice) over (partition by p_mfgr order by p_mfgr),2) as s1, min(p_retailprice) over (partition by p_mfgr) as s2, max(p_retailprice) over (distribute by p_mfgr sort by p_mfgr) as s3, @@ -2097,8 +2019,7 @@ Manufacturer#5 7672.66 1018.1 1789.69 1534.53 5 Manufacturer#5 7672.66 1018.1 1789.69 1534.53 5 Manufacturer#5 7672.66 1018.1 1789.69 1534.53 5 Manufacturer#5 7672.66 1018.1 1789.69 1534.53 5 -PREHOOK: query: -- 38. testPartitioningVariousForms2 -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (partition by p_mfgr, p_name order by p_mfgr, p_name rows between unbounded preceding and current row) as s1, min(p_retailprice) over (distribute by p_mfgr, p_name sort by p_mfgr, p_name rows between unbounded preceding and current row) as s2, max(p_retailprice) over (partition by p_mfgr, p_name order by p_name) as s3 @@ -2106,8 +2027,7 @@ from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 38. testPartitioningVariousForms2 -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (partition by p_mfgr, p_name order by p_mfgr, p_name rows between unbounded preceding and current row) as s1, min(p_retailprice) over (distribute by p_mfgr, p_name sort by p_mfgr, p_name rows between unbounded preceding and current row) as s2, max(p_retailprice) over (partition by p_mfgr, p_name order by p_name) as s3 @@ -2141,15 +2061,13 @@ Manufacturer#5 almond antique medium spring khaki 6 1611.66 1611.66 1611.66 Manufacturer#5 almond antique sky peru orange 2 1788.73 1788.73 1788.73 Manufacturer#5 almond aquamarine dodger light gainsboro 46 1018.1 1018.1 1018.1 Manufacturer#5 almond azure blanched chiffon midnight 23 1464.48 1464.48 1464.48 -PREHOOK: query: -- 39. testUDFOnOrderCols -select p_mfgr, p_type, substr(p_type, 2) as short_ptype, +PREHOOK: query: select p_mfgr, p_type, substr(p_type, 2) as short_ptype, rank() over (partition by p_mfgr order by substr(p_type, 2)) as r from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 39. testUDFOnOrderCols -select p_mfgr, p_type, substr(p_type, 2) as short_ptype, +POSTHOOK: query: select p_mfgr, p_type, substr(p_type, 2) as short_ptype, rank() over (partition by p_mfgr order by substr(p_type, 2)) as r from part POSTHOOK: type: QUERY @@ -2181,15 +2099,13 @@ Manufacturer#5 LARGE BRUSHED BRASS ARGE BRUSHED BRASS 1 Manufacturer#5 MEDIUM BURNISHED TIN EDIUM BURNISHED TIN 3 Manufacturer#5 SMALL PLATED BRASS MALL PLATED BRASS 4 Manufacturer#5 STANDARD BURNISHED TIN TANDARD BURNISHED TIN 5 -PREHOOK: query: -- 40. testNoBetweenForRows -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows unbounded preceding) as s1 from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 40. testNoBetweenForRows -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows unbounded preceding) as s1 from part POSTHOOK: type: QUERY @@ -2221,15 +2137,13 @@ Manufacturer#5 almond antique medium spring khaki 6 3401.3500000000004 Manufacturer#5 almond antique sky peru orange 2 5190.08 Manufacturer#5 almond aquamarine dodger light gainsboro 46 6208.18 Manufacturer#5 almond azure blanched chiffon midnight 23 7672.66 -PREHOOK: query: -- 41. testNoBetweenForRange -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (distribute by p_mfgr sort by p_size range unbounded preceding) as s1 from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 41. testNoBetweenForRange -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (distribute by p_mfgr sort by p_size range unbounded preceding) as s1 from part POSTHOOK: type: QUERY @@ -2261,15 +2175,13 @@ Manufacturer#5 almond antique medium spring khaki 6 3400.3900000000003 Manufacturer#5 almond antique sky peru orange 2 1788.73 Manufacturer#5 almond aquamarine dodger light gainsboro 46 7672.660000000002 Manufacturer#5 almond azure blanched chiffon midnight 23 4864.870000000001 -PREHOOK: query: -- 42. testUnboundedFollowingForRows -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between current row and unbounded following) as s1 from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 42. testUnboundedFollowingForRows -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (distribute by p_mfgr sort by p_name rows between current row and unbounded following) as s1 from part POSTHOOK: type: QUERY @@ -2301,15 +2213,13 @@ Manufacturer#5 almond antique medium spring khaki 6 5882.970000000001 Manufacturer#5 almond antique sky peru orange 2 4271.3099999999995 Manufacturer#5 almond aquamarine dodger light gainsboro 46 2482.58 Manufacturer#5 almond azure blanched chiffon midnight 23 1464.48 -PREHOOK: query: -- 43. testUnboundedFollowingForRange -select p_mfgr, p_name, p_size, +PREHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (distribute by p_mfgr sort by p_size range between current row and unbounded following) as s1 from part PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 43. testUnboundedFollowingForRange -select p_mfgr, p_name, p_size, +POSTHOOK: query: select p_mfgr, p_name, p_size, sum(p_retailprice) over (distribute by p_mfgr sort by p_size range between current row and unbounded following) as s1 from part POSTHOOK: type: QUERY @@ -2341,16 +2251,14 @@ Manufacturer#5 almond antique medium spring khaki 6 5883.93 Manufacturer#5 almond antique sky peru orange 2 7672.660000000002 Manufacturer#5 almond aquamarine dodger light gainsboro 46 1018.1 Manufacturer#5 almond azure blanched chiffon midnight 23 4272.27 -PREHOOK: query: -- 44. testOverNoPartitionSingleAggregate -select p_name, p_retailprice, +PREHOOK: query: select p_name, p_retailprice, round(avg(p_retailprice) over(),2) from part order by p_name PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 44. testOverNoPartitionSingleAggregate -select p_name, p_retailprice, +POSTHOOK: query: select p_name, p_retailprice, round(avg(p_retailprice) over(),2) from part order by p_name @@ -2383,32 +2291,28 @@ almond aquamarine sandy cyan gainsboro 1701.6 1546.78 almond aquamarine yellow dodger mint 1844.92 1546.78 almond azure aquamarine papaya violet 1290.35 1546.78 almond azure blanched chiffon midnight 1464.48 1546.78 -PREHOOK: query: -- 45. empty partition test -select p_mfgr, +PREHOOK: query: select p_mfgr, sum(p_size) over (partition by p_mfgr order by p_size rows between unbounded preceding and current row) from part where p_mfgr = 'Manufacturer#6' PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 45. empty partition test -select p_mfgr, +POSTHOOK: query: select p_mfgr, sum(p_size) over (partition by p_mfgr order by p_size rows between unbounded preceding and current row) from part where p_mfgr = 'Manufacturer#6' POSTHOOK: type: QUERY POSTHOOK: Input: default@part #### A masked pattern was here #### -PREHOOK: query: -- 46. window sz is same as partition sz -select p_retailprice, avg(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following), +PREHOOK: query: select p_retailprice, avg(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following), sum(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following) from part where p_mfgr='Manufacturer#1' PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 46. window sz is same as partition sz -select p_retailprice, avg(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following), +POSTHOOK: query: select p_retailprice, avg(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following), sum(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following) from part where p_mfgr='Manufacturer#1' @@ -2421,14 +2325,12 @@ POSTHOOK: Input: default@part 1602.59 1549.8900000000003 4649.670000000001 1632.66 1632.6600000000008 1632.6600000000008 1753.76 1600.8575000000003 6403.430000000001 -PREHOOK: query: -- 47. empty partition -select sum(p_size) over (partition by p_mfgr ) +PREHOOK: query: select sum(p_size) over (partition by p_mfgr ) from part where p_mfgr = 'm1' PREHOOK: type: QUERY PREHOOK: Input: default@part #### A masked pattern was here #### -POSTHOOK: query: -- 47. empty partition -select sum(p_size) over (partition by p_mfgr ) +POSTHOOK: query: select sum(p_size) over (partition by p_mfgr ) from part where p_mfgr = 'm1' POSTHOOK: type: QUERY POSTHOOK: Input: default@part diff --git a/ql/src/test/results/clientpositive/specialChar.q.out b/ql/src/test/results/clientpositive/specialChar.q.out new file mode 100644 index 0000000..2646ad6 --- /dev/null +++ b/ql/src/test/results/clientpositive/specialChar.q.out @@ -0,0 +1,96 @@ +PREHOOK: query: CREATE TABLE ts(s varchar(550)) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ts +POSTHOOK: query: CREATE TABLE ts(s varchar(550)) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ts +PREHOOK: query: INSERT INTO ts VALUES ('Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3') +PREHOOK: type: QUERY +PREHOOK: Output: default@ts +POSTHOOK: query: INSERT INTO ts VALUES ('Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3') +POSTHOOK: type: QUERY +POSTHOOK: Output: default@ts +POSTHOOK: Lineage: ts.s EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: INSERT INTO ts VALUES ("Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3") +PREHOOK: type: QUERY +PREHOOK: Output: default@ts +POSTHOOK: query: INSERT INTO ts VALUES ("Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3") +POSTHOOK: type: QUERY +POSTHOOK: Output: default@ts +POSTHOOK: Lineage: ts.s EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: INSERT INTO ts VALUES ("Mozilla/5.0 (iPhone\; \\; \\\;CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3") +PREHOOK: type: QUERY +PREHOOK: Output: default@ts +POSTHOOK: query: INSERT INTO ts VALUES ("Mozilla/5.0 (iPhone\; \\; \\\;CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3") +POSTHOOK: type: QUERY +POSTHOOK: Output: default@ts +POSTHOOK: Lineage: ts.s EXPRESSION [(values__tmp__table__3)values__tmp__table__3.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: INSERT INTO ts VALUES ("Mozilla/5.0 (iPhone\/; \/\/\/;CPU \;\;\;iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3") +PREHOOK: type: QUERY +PREHOOK: Output: default@ts +POSTHOOK: query: INSERT INTO ts VALUES ("Mozilla/5.0 (iPhone\/; \/\/\/;CPU \;\;\;iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3") +POSTHOOK: type: QUERY +POSTHOOK: Output: default@ts +POSTHOOK: Lineage: ts.s EXPRESSION [(values__tmp__table__4)values__tmp__table__4.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: INSERT INTO ts VALUES ('\'') +PREHOOK: type: QUERY +PREHOOK: Output: default@ts +POSTHOOK: query: INSERT INTO ts VALUES ('\'') +POSTHOOK: type: QUERY +POSTHOOK: Output: default@ts +POSTHOOK: Lineage: ts.s EXPRESSION [(values__tmp__table__5)values__tmp__table__5.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: INSERT INTO ts VALUES ('\"') +PREHOOK: type: QUERY +PREHOOK: Output: default@ts +POSTHOOK: query: INSERT INTO ts VALUES ('\"') +POSTHOOK: type: QUERY +POSTHOOK: Output: default@ts +POSTHOOK: Lineage: ts.s EXPRESSION [(values__tmp__table__6)values__tmp__table__6.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: INSERT INTO ts VALUES ("Mozilla\"\'/5.0 \"(iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3") +PREHOOK: type: QUERY +PREHOOK: Output: default@ts +POSTHOOK: query: INSERT INTO ts VALUES ("Mozilla\"\'/5.0 \"(iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3") +POSTHOOK: type: QUERY +POSTHOOK: Output: default@ts +POSTHOOK: Lineage: ts.s EXPRESSION [(values__tmp__table__7)values__tmp__table__7.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: INSERT INTO ts VALUES ("Mozilla\'\"/5.0 \'(iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3") +PREHOOK: type: QUERY +PREHOOK: Output: default@ts +POSTHOOK: query: INSERT INTO ts VALUES ("Mozilla\'\"/5.0 \'(iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3") +POSTHOOK: type: QUERY +POSTHOOK: Output: default@ts +POSTHOOK: Lineage: ts.s EXPRESSION [(values__tmp__table__8)values__tmp__table__8.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: INSERT INTO ts VALUES ("Mozilla\\\"/5.0 ;;;;;;(iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3") +PREHOOK: type: QUERY +PREHOOK: Output: default@ts +POSTHOOK: query: INSERT INTO ts VALUES ("Mozilla\\\"/5.0 ;;;;;;(iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3") +POSTHOOK: type: QUERY +POSTHOOK: Output: default@ts +POSTHOOK: Lineage: ts.s EXPRESSION [(values__tmp__table__9)values__tmp__table__9.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: INSERT INTO ts VALUES ("Mozilla\'\\/5.0 ;;;\";;\";(iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3") +PREHOOK: type: QUERY +PREHOOK: Output: default@ts +POSTHOOK: query: INSERT INTO ts VALUES ("Mozilla\'\\/5.0 ;;;\";;\";(iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3") +POSTHOOK: type: QUERY +POSTHOOK: Output: default@ts +POSTHOOK: Lineage: ts.s EXPRESSION [(values__tmp__table__10)values__tmp__table__10.FieldSchema(name:tmp_values_col1, type:string, comment:), ] +PREHOOK: query: select * from ts +PREHOOK: type: QUERY +PREHOOK: Input: default@ts +#### A masked pattern was here #### +POSTHOOK: query: select * from ts +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ts +#### A masked pattern was here #### +Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3 +Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3 +Mozilla/5.0 (iPhone; \; \;CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3 +Mozilla/5.0 (iPhone/; ///;CPU ;;;iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3 +' +" +Mozilla"'/5.0 "(iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3 +Mozilla'"/5.0 '(iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3 +Mozilla\"/5.0 ;;;;;;(iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3 +Mozilla'\/5.0 ;;;";;";(iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3 diff --git a/ql/src/test/results/clientpositive/special_character_in_tabnames_2.q.out b/ql/src/test/results/clientpositive/special_character_in_tabnames_2.q.out index 7baec88..8cb04c7 100644 --- a/ql/src/test/results/clientpositive/special_character_in_tabnames_2.q.out +++ b/ql/src/test/results/clientpositive/special_character_in_tabnames_2.q.out @@ -1,12 +1,6 @@ -PREHOOK: query: -- try the query without indexing, with manual indexing, and with automatic indexing --- SORT_QUERY_RESULTS - -DROP TABLE IF EXISTS `s/c` +PREHOOK: query: DROP TABLE IF EXISTS `s/c` PREHOOK: type: DROPTABLE -POSTHOOK: query: -- try the query without indexing, with manual indexing, and with automatic indexing --- SORT_QUERY_RESULTS - -DROP TABLE IF EXISTS `s/c` +POSTHOOK: query: DROP TABLE IF EXISTS `s/c` POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE `s/c` (key STRING COMMENT 'default', value STRING COMMENT 'default') STORED AS TEXTFILE PREHOOK: type: CREATETABLE @@ -40,13 +34,11 @@ POSTHOOK: query: ANALYZE TABLE `s/c` COMPUTE STATISTICS FOR COLUMNS key,value POSTHOOK: type: QUERY POSTHOOK: Input: default@s/c #### A masked pattern was here #### -PREHOOK: query: -- without indexing -SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100 +PREHOOK: query: SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100 PREHOOK: type: QUERY PREHOOK: Input: default@s/c #### A masked pattern was here #### -POSTHOOK: query: -- without indexing -SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100 +POSTHOOK: query: SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100 POSTHOOK: type: QUERY POSTHOOK: Input: default@s/c #### A masked pattern was here #### @@ -87,13 +79,10 @@ POSTHOOK: Output: default@default__s/c_src_index__ POSTHOOK: Lineage: default__s/c_src_index__._bucketname SIMPLE [(s/c)s/c.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] POSTHOOK: Lineage: default__s/c_src_index__._offsets EXPRESSION [(s/c)s/c.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ] POSTHOOK: Lineage: default__s/c_src_index__.key SIMPLE [(s/c)s/c.FieldSchema(name:key, type:string, comment:default), ] -PREHOOK: query: -- manual indexing #### A masked pattern was here #### PREHOOK: type: QUERY PREHOOK: Input: default@default__s/c_src_index__ #### A masked pattern was here #### -POSTHOOK: query: -- manual indexing -#### A masked pattern was here #### POSTHOOK: type: QUERY POSTHOOK: Input: default@default__s/c_src_index__ #### A masked pattern was here #### @@ -160,11 +149,9 @@ POSTHOOK: Input: default@s/c 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- automatic indexing -EXPLAIN SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100 +PREHOOK: query: EXPLAIN SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100 PREHOOK: type: QUERY -POSTHOOK: query: -- automatic indexing -EXPLAIN SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100 +POSTHOOK: query: EXPLAIN SELECT key, value FROM `s/c` WHERE key > 80 AND key < 100 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-3 is a root stage diff --git a/ql/src/test/results/clientpositive/stats1.q.out b/ql/src/test/results/clientpositive/stats1.q.out index ac076ec..2d5b4f8 100644 --- a/ql/src/test/results/clientpositive/stats1.q.out +++ b/ql/src/test/results/clientpositive/stats1.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table tmptable(key string, value string) +PREHOOK: query: create table tmptable(key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table tmptable(key string, value string) +POSTHOOK: query: create table tmptable(key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable @@ -199,17 +195,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Load a file into a existing table --- Some stats (numFiles, totalSize) should be updated correctly --- Some other stats (numRows, rawDataSize) should be cleared -load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE tmptable +PREHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE tmptable PREHOOK: type: LOAD #### A masked pattern was here #### PREHOOK: Output: default@tmptable -POSTHOOK: query: -- Load a file into a existing table --- Some stats (numFiles, totalSize) should be updated correctly --- Some other stats (numRows, rawDataSize) should be cleared -load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE tmptable +POSTHOOK: query: load data local inpath '../../data/files/srcbucket20.txt' INTO TABLE tmptable POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@tmptable diff --git a/ql/src/test/results/clientpositive/stats18.q.out b/ql/src/test/results/clientpositive/stats18.q.out index 3ad9679..4945808 100644 --- a/ql/src/test/results/clientpositive/stats18.q.out +++ b/ql/src/test/results/clientpositive/stats18.q.out @@ -16,16 +16,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@stats_part@ds=2010-04-08/hr=13 POSTHOOK: Lineage: stats_part PARTITION(ds=2010-04-08,hr=13).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: stats_part PARTITION(ds=2010-04-08,hr=13).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Load a file into a existing partition --- Some stats (numFiles, totalSize) should be updated correctly --- Some other stats (numRows, rawDataSize) should be cleared -desc formatted stats_part partition (ds='2010-04-08', hr='13') +PREHOOK: query: desc formatted stats_part partition (ds='2010-04-08', hr='13') PREHOOK: type: DESCTABLE PREHOOK: Input: default@stats_part -POSTHOOK: query: -- Load a file into a existing partition --- Some stats (numFiles, totalSize) should be updated correctly --- Some other stats (numRows, rawDataSize) should be cleared -desc formatted stats_part partition (ds='2010-04-08', hr='13') +POSTHOOK: query: desc formatted stats_part partition (ds='2010-04-08', hr='13') POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@stats_part # col_name data_type comment diff --git a/ql/src/test/results/clientpositive/stats4.q.out b/ql/src/test/results/clientpositive/stats4.q.out index 2ffbba9..6105017 100644 --- a/ql/src/test/results/clientpositive/stats4.q.out +++ b/ql/src/test/results/clientpositive/stats4.q.out @@ -1,11 +1,7 @@ -PREHOOK: query: -- SORT_AND_HASH_QUERY_RESULTS - -show partitions srcpart +PREHOOK: query: show partitions srcpart PREHOOK: type: SHOWPARTITIONS PREHOOK: Input: default@srcpart -POSTHOOK: query: -- SORT_AND_HASH_QUERY_RESULTS - -show partitions srcpart +POSTHOOK: query: show partitions srcpart POSTHOOK: type: SHOWPARTITIONS POSTHOOK: Input: default@srcpart ds=2008-04-08/hr=11 diff --git a/ql/src/test/results/clientpositive/stats_aggregator_error_1.q.out b/ql/src/test/results/clientpositive/stats_aggregator_error_1.q.out index 4c34ebf..fec20f0 100644 --- a/ql/src/test/results/clientpositive/stats_aggregator_error_1.q.out +++ b/ql/src/test/results/clientpositive/stats_aggregator_error_1.q.out @@ -1,20 +1,8 @@ -PREHOOK: query: -- In this test, there is a dummy stats aggregator which throws an error when various --- methods are called (as indicated by the parameter hive.test.dummystats.aggregator) --- Since stats need not be reliable (by setting hive.stats.reliable to false), the --- insert statements succeed. The insert statement succeeds even if the stats aggregator --- is set to null, since stats need not be reliable. - -create table tmptable(key string, value string) +PREHOOK: query: create table tmptable(key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- In this test, there is a dummy stats aggregator which throws an error when various --- methods are called (as indicated by the parameter hive.test.dummystats.aggregator) --- Since stats need not be reliable (by setting hive.stats.reliable to false), the --- insert statements succeed. The insert statement succeeds even if the stats aggregator --- is set to null, since stats need not be reliable. - -create table tmptable(key string, value string) +POSTHOOK: query: create table tmptable(key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable diff --git a/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out b/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out index 982baab..b855b38 100644 --- a/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out +++ b/ql/src/test/results/clientpositive/stats_empty_dyn_part.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- This test verifies writing a query using dynamic partitions --- which results in no partitions actually being created with --- hive.stats.reliable set to true - -create table tmptable(key string) partitioned by (part string) +PREHOOK: query: create table tmptable(key string) partitioned by (part string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- This test verifies writing a query using dynamic partitions --- which results in no partitions actually being created with --- hive.stats.reliable set to true - -create table tmptable(key string) partitioned by (part string) +POSTHOOK: query: create table tmptable(key string) partitioned by (part string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable diff --git a/ql/src/test/results/clientpositive/stats_empty_partition.q.out b/ql/src/test/results/clientpositive/stats_empty_partition.q.out index d7f2e73..289d17a 100644 --- a/ql/src/test/results/clientpositive/stats_empty_partition.q.out +++ b/ql/src/test/results/clientpositive/stats_empty_partition.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- This test verifies that writing an empty partition succeeds when --- hive.stats.reliable is set to true. - -create table tmptable(key string, value string) partitioned by (part string) +PREHOOK: query: create table tmptable(key string, value string) partitioned by (part string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- This test verifies that writing an empty partition succeeds when --- hive.stats.reliable is set to true. - -create table tmptable(key string, value string) partitioned by (part string) +POSTHOOK: query: create table tmptable(key string, value string) partitioned by (part string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable diff --git a/ql/src/test/results/clientpositive/stats_list_bucket.q.out b/ql/src/test/results/clientpositive/stats_list_bucket.q.out index f4b27dd..0c43b1b 100644 --- a/ql/src/test/results/clientpositive/stats_list_bucket.q.out +++ b/ql/src/test/results/clientpositive/stats_list_bucket.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - -drop table stats_list_bucket +PREHOOK: query: drop table stats_list_bucket PREHOOK: type: DROPTABLE -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - -drop table stats_list_bucket +POSTHOOK: query: drop table stats_list_bucket POSTHOOK: type: DROPTABLE PREHOOK: query: drop table stats_list_bucket_1 PREHOOK: type: DROPTABLE @@ -30,18 +26,12 @@ stored as rcfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@stats_list_bucket -PREHOOK: query: -- Try partitioned table with list bucketing. --- The stats should show 500 rows loaded, as many rows as the src table has. - -insert overwrite table stats_list_bucket partition (ds = '2008-04-08', hr = '11') +PREHOOK: query: insert overwrite table stats_list_bucket partition (ds = '2008-04-08', hr = '11') select key, value from src PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@stats_list_bucket@ds=2008-04-08/hr=11 -POSTHOOK: query: -- Try partitioned table with list bucketing. --- The stats should show 500 rows loaded, as many rows as the src table has. - -insert overwrite table stats_list_bucket partition (ds = '2008-04-08', hr = '11') +POSTHOOK: query: insert overwrite table stats_list_bucket partition (ds = '2008-04-08', hr = '11') select key, value from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src @@ -93,10 +83,7 @@ Skewed Values: [[287, val_287], [466, val_466], [82, val_82]] Skewed Value to Truncated Path: {[287, val_287]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=287/c2=val_287, [466, val_466]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=466/c2=val_466, [82, val_82]=/stats_list_bucket/ds=2008-04-08/hr=11/c1=82/c2=val_82} Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Also try non-partitioned table with list bucketing. --- Stats should show the same number of rows. - -create table stats_list_bucket_1 ( +PREHOOK: query: create table stats_list_bucket_1 ( c1 string, c2 string ) @@ -106,10 +93,7 @@ stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@stats_list_bucket_1 -POSTHOOK: query: -- Also try non-partitioned table with list bucketing. --- Stats should show the same number of rows. - -create table stats_list_bucket_1 ( +POSTHOOK: query: create table stats_list_bucket_1 ( c1 string, c2 string ) diff --git a/ql/src/test/results/clientpositive/stats_noscan_1.q.out b/ql/src/test/results/clientpositive/stats_noscan_1.q.out index 7382b31..9e1652b 100644 --- a/ql/src/test/results/clientpositive/stats_noscan_1.q.out +++ b/ql/src/test/results/clientpositive/stats_noscan_1.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- test analyze table ... compute statistics noscan - --- 1. test full spec -create table analyze_srcpart like srcpart +PREHOOK: query: create table analyze_srcpart like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@analyze_srcpart -POSTHOOK: query: -- test analyze table ... compute statistics noscan - --- 1. test full spec -create table analyze_srcpart like srcpart +POSTHOOK: query: create table analyze_srcpart like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@analyze_srcpart @@ -76,12 +70,10 @@ POSTHOOK: Input: default@analyze_srcpart POSTHOOK: Input: default@analyze_srcpart@ds=2008-04-08/hr=12 POSTHOOK: Output: default@analyze_srcpart POSTHOOK: Output: default@analyze_srcpart@ds=2008-04-08/hr=12 -PREHOOK: query: -- confirm result -describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11) +PREHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11) PREHOOK: type: DESCTABLE PREHOOK: Input: default@analyze_srcpart -POSTHOOK: query: -- confirm result -describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11) +POSTHOOK: query: describe formatted analyze_srcpart PARTITION(ds='2008-04-08',hr=11) POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@analyze_srcpart # col_name data_type comment @@ -272,13 +264,11 @@ POSTHOOK: query: drop table analyze_srcpart POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@analyze_srcpart POSTHOOK: Output: default@analyze_srcpart -PREHOOK: query: -- 2. test partial spec -create table analyze_srcpart_partial like srcpart +PREHOOK: query: create table analyze_srcpart_partial like srcpart PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@analyze_srcpart_partial -POSTHOOK: query: -- 2. test partial spec -create table analyze_srcpart_partial like srcpart +POSTHOOK: query: create table analyze_srcpart_partial like srcpart POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@analyze_srcpart_partial @@ -338,12 +328,10 @@ POSTHOOK: Input: default@analyze_srcpart_partial@ds=2008-04-08/hr=12 POSTHOOK: Output: default@analyze_srcpart_partial POSTHOOK: Output: default@analyze_srcpart_partial@ds=2008-04-08/hr=11 POSTHOOK: Output: default@analyze_srcpart_partial@ds=2008-04-08/hr=12 -PREHOOK: query: -- confirm result -describe formatted analyze_srcpart_partial PARTITION(ds='2008-04-08',hr=11) +PREHOOK: query: describe formatted analyze_srcpart_partial PARTITION(ds='2008-04-08',hr=11) PREHOOK: type: DESCTABLE PREHOOK: Input: default@analyze_srcpart_partial -POSTHOOK: query: -- confirm result -describe formatted analyze_srcpart_partial PARTITION(ds='2008-04-08',hr=11) +POSTHOOK: query: describe formatted analyze_srcpart_partial PARTITION(ds='2008-04-08',hr=11) POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@analyze_srcpart_partial # col_name data_type comment diff --git a/ql/src/test/results/clientpositive/stats_noscan_2.q.out b/ql/src/test/results/clientpositive/stats_noscan_2.q.out index 40b2a66..1e9603e 100644 --- a/ql/src/test/results/clientpositive/stats_noscan_2.q.out +++ b/ql/src/test/results/clientpositive/stats_noscan_2.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- test analyze table compute statistiscs [noscan] on external table --- 1 test table #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default PREHOOK: Output: default@anaylyze_external -POSTHOOK: query: -- test analyze table compute statistiscs [noscan] on external table --- 1 test table #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### @@ -118,15 +114,11 @@ POSTHOOK: query: drop table anaylyze_external POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@anaylyze_external POSTHOOK: Output: default@anaylyze_external -PREHOOK: query: -- 2 test partition --- prepare data -create table texternal(key string, val string) partitioned by (insertdate string) +PREHOOK: query: create table texternal(key string, val string) partitioned by (insertdate string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@texternal -POSTHOOK: query: -- 2 test partition --- prepare data -create table texternal(key string, val string) partitioned by (insertdate string) +POSTHOOK: query: create table texternal(key string, val string) partitioned by (insertdate string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@texternal @@ -158,13 +150,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@texternal #### A masked pattern was here #### 500 -PREHOOK: query: -- create external table #### A masked pattern was here #### PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default PREHOOK: Output: default@anaylyze_external -POSTHOOK: query: -- create external table #### A masked pattern was here #### POSTHOOK: type: CREATETABLE #### A masked pattern was here #### @@ -190,15 +180,13 @@ POSTHOOK: Input: default@anaylyze_external POSTHOOK: Input: default@anaylyze_external@insertdate=2008-01-01 #### A masked pattern was here #### 500 -PREHOOK: query: -- analyze -analyze table anaylyze_external PARTITION (insertdate='2008-01-01') compute statistics noscan +PREHOOK: query: analyze table anaylyze_external PARTITION (insertdate='2008-01-01') compute statistics noscan PREHOOK: type: QUERY PREHOOK: Input: default@anaylyze_external PREHOOK: Input: default@anaylyze_external@insertdate=2008-01-01 PREHOOK: Output: default@anaylyze_external PREHOOK: Output: default@anaylyze_external@insertdate=2008-01-01 -POSTHOOK: query: -- analyze -analyze table anaylyze_external PARTITION (insertdate='2008-01-01') compute statistics noscan +POSTHOOK: query: analyze table anaylyze_external PARTITION (insertdate='2008-01-01') compute statistics noscan POSTHOOK: type: QUERY POSTHOOK: Input: default@anaylyze_external POSTHOOK: Input: default@anaylyze_external@insertdate=2008-01-01 diff --git a/ql/src/test/results/clientpositive/stats_partscan_1_23.q.out b/ql/src/test/results/clientpositive/stats_partscan_1_23.q.out index ab2baaa..cf9867d 100644 --- a/ql/src/test/results/clientpositive/stats_partscan_1_23.q.out +++ b/ql/src/test/results/clientpositive/stats_partscan_1_23.q.out @@ -1,26 +1,10 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- This test uses mapred.max.split.size/mapred.max.split.size for controlling --- number of input splits. --- stats_partscan_1.q is the same test with this but has different result. - --- test analyze table ... compute statistics partialscan - --- 1. prepare data -CREATE table analyze_srcpart_partial_scan (key STRING, value STRING) +PREHOOK: query: CREATE table analyze_srcpart_partial_scan (key STRING, value STRING) partitioned by (ds string, hr string) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@analyze_srcpart_partial_scan -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- This test uses mapred.max.split.size/mapred.max.split.size for controlling --- number of input splits. --- stats_partscan_1.q is the same test with this but has different result. - --- test analyze table ... compute statistics partialscan - --- 1. prepare data -CREATE table analyze_srcpart_partial_scan (key STRING, value STRING) +POSTHOOK: query: CREATE table analyze_srcpart_partial_scan (key STRING, value STRING) partitioned by (ds string, hr string) stored as rcfile POSTHOOK: type: CREATETABLE @@ -90,12 +74,10 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- 2. partialscan -explain +PREHOOK: query: explain analyze table analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) compute statistics partialscan PREHOOK: type: QUERY -POSTHOOK: query: -- 2. partialscan -explain +POSTHOOK: query: explain analyze table analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) compute statistics partialscan POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -121,12 +103,10 @@ POSTHOOK: Input: default@analyze_srcpart_partial_scan POSTHOOK: Input: default@analyze_srcpart_partial_scan@ds=2008-04-08/hr=11 POSTHOOK: Output: default@analyze_srcpart_partial_scan POSTHOOK: Output: default@analyze_srcpart_partial_scan@ds=2008-04-08/hr=11 -PREHOOK: query: -- 3. confirm result -describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) +PREHOOK: query: describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) PREHOOK: type: DESCTABLE PREHOOK: Input: default@analyze_srcpart_partial_scan -POSTHOOK: query: -- 3. confirm result -describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) +POSTHOOK: query: describe formatted analyze_srcpart_partial_scan PARTITION(ds='2008-04-08',hr=11) POSTHOOK: type: DESCTABLE POSTHOOK: Input: default@analyze_srcpart_partial_scan # col_name data_type comment diff --git a/ql/src/test/results/clientpositive/stats_publisher_error_1.q.out b/ql/src/test/results/clientpositive/stats_publisher_error_1.q.out index 01632f4..6fe20fd 100644 --- a/ql/src/test/results/clientpositive/stats_publisher_error_1.q.out +++ b/ql/src/test/results/clientpositive/stats_publisher_error_1.q.out @@ -1,20 +1,8 @@ -PREHOOK: query: -- In this test, there is a dummy stats publisher which throws an error when various --- methods are called (as indicated by the parameter hive.test.dummystats.publisher) --- Since stats need not be reliable (by setting hive.stats.reliable to false), the --- insert statements succeed. The insert statement succeeds even if the stats publisher --- is set to null, since stats need not be reliable. - -create table tmptable(key string, value string) +PREHOOK: query: create table tmptable(key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- In this test, there is a dummy stats publisher which throws an error when various --- methods are called (as indicated by the parameter hive.test.dummystats.publisher) --- Since stats need not be reliable (by setting hive.stats.reliable to false), the --- insert statements succeed. The insert statement succeeds even if the stats publisher --- is set to null, since stats need not be reliable. - -create table tmptable(key string, value string) +POSTHOOK: query: create table tmptable(key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable diff --git a/ql/src/test/results/clientpositive/statsfs.q.out b/ql/src/test/results/clientpositive/statsfs.q.out index 9d63bf7..d070e9a 100644 --- a/ql/src/test/results/clientpositive/statsfs.q.out +++ b/ql/src/test/results/clientpositive/statsfs.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- stats computation on partitioned table with analyze command - -create table t1 (key string, value string) partitioned by (ds string) +PREHOOK: query: create table t1 (key string, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- stats computation on partitioned table with analyze command - -create table t1 (key string, value string) partitioned by (ds string) +POSTHOOK: query: create table t1 (key string, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 @@ -130,15 +126,11 @@ POSTHOOK: query: drop table t1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 -PREHOOK: query: -- stats computation on partitioned table with autogather on insert query - -create table t1 (key string, value string) partitioned by (ds string) +PREHOOK: query: create table t1 (key string, value string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- stats computation on partitioned table with autogather on insert query - -create table t1 (key string, value string) partitioned by (ds string) +POSTHOOK: query: create table t1 (key string, value string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 @@ -248,15 +240,11 @@ POSTHOOK: query: drop table t1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 -PREHOOK: query: -- analyze stmt on unpartitioned table - -create table t1 (key string, value string) +PREHOOK: query: create table t1 (key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- analyze stmt on unpartitioned table - -create table t1 (key string, value string) +POSTHOOK: query: create table t1 (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 @@ -319,15 +307,11 @@ POSTHOOK: query: drop table t1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 -PREHOOK: query: -- stats computation on unpartitioned table with autogather on insert query - -create table t1 (key string, value string) +PREHOOK: query: create table t1 (key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- stats computation on unpartitioned table with autogather on insert query - -create table t1 (key string, value string) +POSTHOOK: query: create table t1 (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 @@ -384,17 +368,11 @@ POSTHOOK: query: drop table t1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@t1 POSTHOOK: Output: default@t1 -PREHOOK: query: -- stats computation on partitioned table with autogather on insert query with dynamic partitioning - - -create table t1 (key string, value string) partitioned by (ds string, hr string) +PREHOOK: query: create table t1 (key string, value string) partitioned by (ds string, hr string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- stats computation on partitioned table with autogather on insert query with dynamic partitioning - - -create table t1 (key string, value string) partitioned by (ds string, hr string) +POSTHOOK: query: create table t1 (key string, value string) partitioned by (ds string, hr string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 diff --git a/ql/src/test/results/clientpositive/subquery_exists.q.out b/ql/src/test/results/clientpositive/subquery_exists.q.out index 1019e7a..4e9f5cb 100644 --- a/ql/src/test/results/clientpositive/subquery_exists.q.out +++ b/ql/src/test/results/clientpositive/subquery_exists.q.out @@ -1,8 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- no agg, corr --- SORT_QUERY_RESULTS -explain +PREHOOK: query: explain select * from src b where exists @@ -11,11 +7,7 @@ where exists where b.value = a.value and a.key = b.key and a.value > 'val_9' ) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- no agg, corr --- SORT_QUERY_RESULTS -explain +POSTHOOK: query: explain select * from src b where exists @@ -207,8 +199,7 @@ POSTHOOK: Input: default@src 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- view test -create view cv1 as +PREHOOK: query: create view cv1 as select * from src b where exists @@ -219,8 +210,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@cv1 -POSTHOOK: query: -- view test -create view cv1 as +POSTHOOK: query: create view cv1 as select * from src b where exists @@ -252,8 +242,7 @@ POSTHOOK: Input: default@src 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- sq in from -select * +PREHOOK: query: select * from (select * from src b where exists @@ -264,8 +253,7 @@ from (select * PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- sq in from -select * +POSTHOOK: query: select * from (select * from src b where exists @@ -287,8 +275,7 @@ POSTHOOK: Input: default@src 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- upper case in subq -explain +PREHOOK: query: explain select * from src b where exists @@ -297,8 +284,7 @@ where exists where b.VALUE = a.VALUE ) PREHOOK: type: QUERY -POSTHOOK: query: -- upper case in subq -explain +POSTHOOK: query: explain select * from src b where exists diff --git a/ql/src/test/results/clientpositive/subquery_exists_explain_rewrite.q.out b/ql/src/test/results/clientpositive/subquery_exists_explain_rewrite.q.out index fd8dbc5..6bb95c8 100644 --- a/ql/src/test/results/clientpositive/subquery_exists_explain_rewrite.q.out +++ b/ql/src/test/results/clientpositive/subquery_exists_explain_rewrite.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- no agg, corr -explain rewrite +PREHOOK: query: explain rewrite select * from src b where exists @@ -8,8 +7,7 @@ where exists where b.value = a.value and a.key = b.key and a.value > 'val_9' ) PREHOOK: type: QUERY -POSTHOOK: query: -- no agg, corr -explain rewrite +POSTHOOK: query: explain rewrite select * from src b where exists @@ -35,8 +33,7 @@ from src b left semi join (select a.key, a.value as sq_corr_0, a.key as sq_corr where a.value > 'val_9' ) sq_1 on b.value = sq_1.sq_corr_0 and sq_1.sq_corr_1 = b.key where 1 = 1 -PREHOOK: query: -- sq in from -explain rewrite +PREHOOK: query: explain rewrite select * from (select * from src b @@ -46,8 +43,7 @@ from (select * where b.value = a.value and a.key = b.key and a.value > 'val_9') ) a PREHOOK: type: QUERY -POSTHOOK: query: -- sq in from -explain rewrite +POSTHOOK: query: explain rewrite select * from (select * from src b diff --git a/ql/src/test/results/clientpositive/subquery_exists_having.q.out b/ql/src/test/results/clientpositive/subquery_exists_having.q.out index e54e18f..a369c69 100644 --- a/ql/src/test/results/clientpositive/subquery_exists_having.q.out +++ b/ql/src/test/results/clientpositive/subquery_exists_having.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- no agg, corr -explain +PREHOOK: query: explain select b.key, count(*) from src b group by b.key @@ -9,8 +8,7 @@ having exists where a.key = b.key and a.value > 'val_9' ) PREHOOK: type: QUERY -POSTHOOK: query: -- no agg, corr -explain +POSTHOOK: query: explain select b.key, count(*) from src b group by b.key @@ -237,8 +235,7 @@ POSTHOOK: Input: default@src 96 1 97 2 98 2 -PREHOOK: query: -- no agg, corr -explain +PREHOOK: query: explain select b.key, count(*) from src b group by b.key @@ -248,8 +245,7 @@ having exists where a.key = b.key and a.value > 'val_9' ) PREHOOK: type: QUERY -POSTHOOK: query: -- no agg, corr -explain +POSTHOOK: query: explain select b.key, count(*) from src b group by b.key @@ -457,8 +453,7 @@ POSTHOOK: Input: default@src 96 1 97 2 98 2 -PREHOOK: query: -- view test -create view cv1 as +PREHOOK: query: create view cv1 as select b.key, count(*) as c from src b group by b.key @@ -471,8 +466,7 @@ PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@cv1 -POSTHOOK: query: -- view test -create view cv1 as +POSTHOOK: query: create view cv1 as select b.key, count(*) as c from src b group by b.key @@ -501,8 +495,7 @@ POSTHOOK: Input: default@src 96 1 97 2 98 2 -PREHOOK: query: -- sq in from -select * +PREHOOK: query: select * from (select b.key, count(*) from src b group by b.key @@ -515,8 +508,7 @@ from (select b.key, count(*) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- sq in from -select * +POSTHOOK: query: select * from (select b.key, count(*) from src b group by b.key @@ -535,8 +527,7 @@ POSTHOOK: Input: default@src 96 1 97 2 98 2 -PREHOOK: query: -- join on agg -select b.key, min(b.value) +PREHOOK: query: select b.key, min(b.value) from src b group by b.key having exists ( select a.key @@ -546,8 +537,7 @@ having exists ( select a.key PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- join on agg -select b.key, min(b.value) +POSTHOOK: query: select b.key, min(b.value) from src b group by b.key having exists ( select a.key diff --git a/ql/src/test/results/clientpositive/subquery_in_explain_rewrite.q.out b/ql/src/test/results/clientpositive/subquery_in_explain_rewrite.q.out index 5a27c3d..f87ffff 100644 --- a/ql/src/test/results/clientpositive/subquery_in_explain_rewrite.q.out +++ b/ql/src/test/results/clientpositive/subquery_in_explain_rewrite.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- non agg, non corr -explain rewrite +PREHOOK: query: explain rewrite select * from src where src.key in (select key from src s1 where s1.key > '9') PREHOOK: type: QUERY -POSTHOOK: query: -- non agg, non corr -explain rewrite +POSTHOOK: query: explain rewrite select * from src where src.key in (select key from src s1 where s1.key > '9') @@ -21,8 +19,7 @@ Rewritten Query: select * from src left semi join (select key from src s1 where s1.key > '9') sq_1 on src.key = sq_1.key where 1 = 1 -PREHOOK: query: -- non agg, corr -explain rewrite +PREHOOK: query: explain rewrite select * from src b where b.key in @@ -31,8 +28,7 @@ where b.key in where b.value = a.value and a.key > '9' ) PREHOOK: type: QUERY -POSTHOOK: query: -- non agg, corr -explain rewrite +POSTHOOK: query: explain rewrite select * from src b where b.key in @@ -58,8 +54,7 @@ from src b left semi join (select a.key, a.value as sq_corr_0 where a.key > '9' ) sq_1 on b.value = sq_1.sq_corr_0 and b.key = sq_1.key where 1 = 1 -PREHOOK: query: -- agg, non corr -explain rewrite +PREHOOK: query: explain rewrite select p_name, p_size from part where part.p_size in @@ -68,8 +63,7 @@ part where part.p_size in where r <= 2 ) PREHOOK: type: QUERY -POSTHOOK: query: -- agg, non corr -explain rewrite +POSTHOOK: query: explain rewrite select p_name, p_size from part where part.p_size in @@ -95,8 +89,7 @@ part left semi join (select avg(p_size) from (select p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a where r <= 2 ) sq_1 on part.p_size = sq_1._c0 where 1 = 1 -PREHOOK: query: -- agg, corr -explain rewrite +PREHOOK: query: explain rewrite select p_mfgr, p_name, p_size from part b where b.p_size in (select min(p_size) @@ -104,8 +97,7 @@ from part b where b.p_size in where r <= 2 and b.p_mfgr = a.p_mfgr ) PREHOOK: type: QUERY -POSTHOOK: query: -- agg, corr -explain rewrite +POSTHOOK: query: explain rewrite select p_mfgr, p_name, p_size from part b where b.p_size in (select min(p_size) @@ -129,8 +121,7 @@ from part b left semi join (select min(p_size), a.p_mfgr as sq_corr_0 from (select p_mfgr, p_size, rank() over(partition by p_mfgr order by p_size) as r from part) a where r <= 2 group by a.p_mfgr) sq_1 on b.p_mfgr = sq_1.sq_corr_0 and b.p_size = sq_1._c0 where 1 = 1 -PREHOOK: query: -- distinct, corr -explain rewrite +PREHOOK: query: explain rewrite select * from src b where b.key in @@ -139,8 +130,7 @@ where b.key in where b.value = a.value and a.key > '9' ) PREHOOK: type: QUERY -POSTHOOK: query: -- distinct, corr -explain rewrite +POSTHOOK: query: explain rewrite select * from src b where b.key in @@ -166,15 +156,13 @@ from src b left semi join (select distinct a.key, a.value as sq_corr_0 where a.key > '9' ) sq_1 on b.value = sq_1.sq_corr_0 and b.key = sq_1.key where 1 = 1 -PREHOOK: query: -- non agg, non corr, windowing -explain rewrite +PREHOOK: query: explain rewrite select p_mfgr, p_name, p_size from part where part.p_size in (select first_value(p_size) over(partition by p_mfgr order by p_size) from part) PREHOOK: type: QUERY -POSTHOOK: query: -- non agg, non corr, windowing -explain rewrite +POSTHOOK: query: explain rewrite select p_mfgr, p_name, p_size from part where part.p_size in @@ -191,15 +179,13 @@ Rewritten Query: select p_mfgr, p_name, p_size from part left semi join (select first_value(p_size) over(partition by p_mfgr order by p_size) from part) sq_1 on part.p_size = sq_1.first_value_window_0 where 1 = 1 -PREHOOK: query: -- non agg, non corr, with join in Parent Query -explain rewrite +PREHOOK: query: explain rewrite select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR') PREHOOK: type: QUERY -POSTHOOK: query: -- non agg, non corr, with join in Parent Query -explain rewrite +POSTHOOK: query: explain rewrite select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and @@ -217,15 +203,13 @@ select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey left semi join (select l_orderkey from lineitem where l_shipmode = 'AIR') sq_1 on li.l_orderkey = sq_1.l_orderkey where li.l_linenumber = 1 and 1 = 1 -PREHOOK: query: -- non agg, corr, with join in Parent Query -explain rewrite +PREHOOK: query: explain rewrite select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) PREHOOK: type: QUERY -POSTHOOK: query: -- non agg, corr, with join in Parent Query -explain rewrite +POSTHOOK: query: explain rewrite select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and diff --git a/ql/src/test/results/clientpositive/subquery_in_having.q.out b/ql/src/test/results/clientpositive/subquery_in_having.q.out index e277c59..8143605 100644 --- a/ql/src/test/results/clientpositive/subquery_in_having.q.out +++ b/ql/src/test/results/clientpositive/subquery_in_having.q.out @@ -1,12 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- data setup -DROP TABLE IF EXISTS part_subq +PREHOOK: query: DROP TABLE IF EXISTS part_subq PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- data setup -DROP TABLE IF EXISTS part_subq +POSTHOOK: query: DROP TABLE IF EXISTS part_subq POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE part_subq( p_partkey INT, @@ -44,15 +38,13 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/part_tiny.txt' overwri POSTHOOK: type: LOAD #### A masked pattern was here #### POSTHOOK: Output: default@part_subq -PREHOOK: query: -- non agg, non corr -explain +PREHOOK: query: explain select key, count(*) from src group by key having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.key ) PREHOOK: type: QUERY -POSTHOOK: query: -- non agg, non corr -explain +POSTHOOK: query: explain select key, count(*) from src group by key @@ -266,15 +258,13 @@ POSTHOOK: Input: default@src 5 3 70 3 90 3 -PREHOOK: query: -- non agg, corr -explain +PREHOOK: query: explain select key, value, count(*) from src b group by key, value having count(*) in (select count(*) from src s1 where s1.key > '9' and s1.value = b.value group by s1.key ) PREHOOK: type: QUERY -POSTHOOK: query: -- non agg, corr -explain +POSTHOOK: query: explain select key, value, count(*) from src b group by key, value @@ -521,8 +511,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- agg, non corr -explain +PREHOOK: query: explain select p_mfgr, avg(p_size) from part_subq b group by b.p_mfgr @@ -533,8 +522,7 @@ having b.p_mfgr in having max(p_size) - min(p_size) < 20 ) PREHOOK: type: QUERY -POSTHOOK: query: -- agg, non corr -explain +POSTHOOK: query: explain select p_mfgr, avg(p_size) from part_subq b group by b.p_mfgr @@ -675,8 +663,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- agg, non corr -explain +PREHOOK: query: explain select p_mfgr, avg(p_size) from part_subq b group by b.p_mfgr @@ -687,8 +674,7 @@ having b.p_mfgr in having max(p_size) - min(p_size) < 20 ) PREHOOK: type: QUERY -POSTHOOK: query: -- agg, non corr -explain +POSTHOOK: query: explain select p_mfgr, avg(p_size) from part_subq b group by b.p_mfgr @@ -813,8 +799,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- join on agg -select b.key, min(b.value) +PREHOOK: query: select b.key, min(b.value) from src b group by b.key having b.key in ( select a.key @@ -824,8 +809,7 @@ having b.key in ( select a.key PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- join on agg -select b.key, min(b.value) +POSTHOOK: query: select b.key, min(b.value) from src b group by b.key having b.key in ( select a.key @@ -841,26 +825,14 @@ POSTHOOK: Input: default@src 96 val_96 97 val_97 98 val_98 -PREHOOK: query: -- where and having --- Plan is: --- Stage 1: b semijoin sq1:src (subquery in where) --- Stage 2: group by Stage 1 o/p --- Stage 5: group by on sq2:src (subquery in having) --- Stage 6: Stage 2 o/p semijoin Stage 5 -explain +PREHOOK: query: explain select key, value, count(*) from src b where b.key in (select key from src where src.key > '8') group by key, value having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.key ) PREHOOK: type: QUERY -POSTHOOK: query: -- where and having --- Plan is: --- Stage 1: b semijoin sq1:src (subquery in where) --- Stage 2: group by Stage 1 o/p --- Stage 5: group by on sq2:src (subquery in having) --- Stage 6: Stage 2 o/p semijoin Stage 5 -explain +POSTHOOK: query: explain select key, value, count(*) from src b where b.key in (select key from src where src.key > '8') @@ -1093,28 +1065,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- Plan is: --- Stage 5: group by on sq2:src (subquery in having) --- Stage 10: hashtable for sq1:src (subquery in where) --- Stage 2: b map-side semijoin Stage 10 o/p --- Stage 3: Stage 2 semijoin Stage 5 --- Stage 9: construct hastable for Stage 5 o/p --- Stage 6: Stage 2 map-side semijoin Stage 9 -explain +PREHOOK: query: explain select key, value, count(*) from src b where b.key in (select key from src where src.key > '8') group by key, value having count(*) in (select count(*) from src s1 where s1.key > '9' group by s1.key ) PREHOOK: type: QUERY -POSTHOOK: query: -- Plan is: --- Stage 5: group by on sq2:src (subquery in having) --- Stage 10: hashtable for sq1:src (subquery in where) --- Stage 2: b map-side semijoin Stage 10 o/p --- Stage 3: Stage 2 semijoin Stage 5 --- Stage 9: construct hastable for Stage 5 o/p --- Stage 6: Stage 2 map-side semijoin Stage 9 -explain +POSTHOOK: query: explain select key, value, count(*) from src b where b.key in (select key from src where src.key > '8') @@ -1347,16 +1305,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- non agg, non corr, windowing -explain +PREHOOK: query: explain select p_mfgr, p_name, avg(p_size) from part_subq group by p_mfgr, p_name having p_name in (select first_value(p_name) over(partition by p_mfgr order by p_size) from part_subq) PREHOOK: type: QUERY -POSTHOOK: query: -- non agg, non corr, windowing -explain +POSTHOOK: query: explain select p_mfgr, p_name, avg(p_size) from part_subq group by p_mfgr, p_name diff --git a/ql/src/test/results/clientpositive/subquery_multiinsert.q.out b/ql/src/test/results/clientpositive/subquery_multiinsert.q.out index fd35547..b8923d8 100644 --- a/ql/src/test/results/clientpositive/subquery_multiinsert.q.out +++ b/ql/src/test/results/clientpositive/subquery_multiinsert.q.out @@ -1,15 +1,11 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src_4( +PREHOOK: query: CREATE TABLE src_4( key STRING, value STRING ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src_4 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src_4( +POSTHOOK: query: CREATE TABLE src_4( key STRING, value STRING ) diff --git a/ql/src/test/results/clientpositive/subquery_notexists.q.out b/ql/src/test/results/clientpositive/subquery_notexists.q.out index 6ec3b46..89527e9 100644 --- a/ql/src/test/results/clientpositive/subquery_notexists.q.out +++ b/ql/src/test/results/clientpositive/subquery_notexists.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- no agg, corr -explain +PREHOOK: query: explain select * from src b where not exists @@ -8,8 +7,7 @@ where not exists where b.value = a.value and a.key = b.key and a.value > 'val_2' ) PREHOOK: type: QUERY -POSTHOOK: query: -- no agg, corr -explain +POSTHOOK: query: explain select * from src b where not exists @@ -321,8 +319,7 @@ POSTHOOK: Input: default@src 199 val_199 199 val_199 2 val_2 -PREHOOK: query: -- distinct, corr -explain +PREHOOK: query: explain select * from src b where not exists @@ -331,8 +328,7 @@ where not exists where b.value = a.value and a.value > 'val_2' ) PREHOOK: type: QUERY -POSTHOOK: query: -- distinct, corr -explain +POSTHOOK: query: explain select * from src b where not exists diff --git a/ql/src/test/results/clientpositive/subquery_notexists_having.q.out b/ql/src/test/results/clientpositive/subquery_notexists_having.q.out index 5948f9a..0ca743d 100644 --- a/ql/src/test/results/clientpositive/subquery_notexists_having.q.out +++ b/ql/src/test/results/clientpositive/subquery_notexists_having.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- no agg, corr -explain +PREHOOK: query: explain select * from src b group by key, value @@ -9,8 +8,7 @@ having not exists where b.value = a.value and a.key = b.key and a.value > 'val_12' ) PREHOOK: type: QUERY -POSTHOOK: query: -- no agg, corr -explain +POSTHOOK: query: explain select * from src b group by key, value @@ -253,8 +251,7 @@ POSTHOOK: Input: default@src 118 val_118 119 val_119 12 val_12 -PREHOOK: query: -- distinct, corr -explain +PREHOOK: query: explain select * from src b group by key, value @@ -264,8 +261,7 @@ having not exists where b.value = a.value and a.value > 'val_12' ) PREHOOK: type: QUERY -POSTHOOK: query: -- distinct, corr -explain +POSTHOOK: query: explain select * from src b group by key, value diff --git a/ql/src/test/results/clientpositive/subquery_notin_having.q.out b/ql/src/test/results/clientpositive/subquery_notin_having.q.out index 9f72cc9..abdd4c7 100644 --- a/ql/src/test/results/clientpositive/subquery_notin_having.q.out +++ b/ql/src/test/results/clientpositive/subquery_notin_having.q.out @@ -1,7 +1,5 @@ Warning: Shuffle Join JOIN[23][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product -PREHOOK: query: -- non agg, non corr - -explain +PREHOOK: query: explain select key, count(*) from src group by key @@ -10,9 +8,7 @@ having key not in where s1.key > '12' ) PREHOOK: type: QUERY -POSTHOOK: query: -- non agg, non corr - -explain +POSTHOOK: query: explain select key, count(*) from src group by key @@ -209,8 +205,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- non agg, corr -explain +PREHOOK: query: explain select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr @@ -220,8 +215,7 @@ having b.p_mfgr not in where min(p_retailprice) = l and r - l > 600 ) PREHOOK: type: QUERY -POSTHOOK: query: -- non agg, corr -explain +POSTHOOK: query: explain select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr @@ -810,8 +804,7 @@ POSTHOOK: Input: default@part Manufacturer#1 1173.15 Manufacturer#2 1690.68 Warning: Shuffle Join JOIN[32][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-2:MAPRED' is a cross product -PREHOOK: query: -- agg, non corr -explain +PREHOOK: query: explain select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr @@ -822,8 +815,7 @@ having b.p_mfgr not in having max(p_retailprice) - min(p_retailprice) > 600 ) PREHOOK: type: QUERY -POSTHOOK: query: -- agg, non corr -explain +POSTHOOK: query: explain select b.p_mfgr, min(p_retailprice) from part b group by b.p_mfgr @@ -1100,13 +1092,11 @@ POSTHOOK: Input: default@part #### A masked pattern was here #### Manufacturer#1 1173.15 Manufacturer#2 1690.68 -PREHOOK: query: --nullability tests -CREATE TABLE t1 (c1 INT, c2 CHAR(100)) +PREHOOK: query: CREATE TABLE t1 (c1 INT, c2 CHAR(100)) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: --nullability tests -CREATE TABLE t1 (c1 INT, c2 CHAR(100)) +POSTHOOK: query: CREATE TABLE t1 (c1 INT, c2 CHAR(100)) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 diff --git a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out index 7385b4c..1eb1b93 100644 --- a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out +++ b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out @@ -34,11 +34,9 @@ POSTHOOK: query: create table part2( POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@part2 -PREHOOK: query: -- non agg, corr -explain select * from src11 where src11.key1 in (select key from src where src11.value1 = value and key > '9') +PREHOOK: query: explain select * from src11 where src11.key1 in (select key from src where src11.value1 = value and key > '9') PREHOOK: type: QUERY -POSTHOOK: query: -- non agg, corr -explain select * from src11 where src11.key1 in (select key from src where src11.value1 = value and key > '9') +POSTHOOK: query: explain select * from src11 where src11.key1 in (select key from src where src11.value1 = value and key > '9') POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-4 is a root stage @@ -350,8 +348,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- agg, corr -explain +PREHOOK: query: explain select p_mfgr, p_name, p_size from part b where b.p_size in (select min(p2_size) @@ -359,8 +356,7 @@ from part b where b.p_size in where r <= 2 and b.p_mfgr = p2_mfgr ) PREHOOK: type: QUERY -POSTHOOK: query: -- agg, corr -explain +POSTHOOK: query: explain select p_mfgr, p_name, p_size from part b where b.p_size in (select min(p2_size) @@ -866,8 +862,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- distinct, corr -explain +PREHOOK: query: explain select * from src b where b.key in @@ -876,8 +871,7 @@ where b.key in where b.value = value and key > '9' ) PREHOOK: type: QUERY -POSTHOOK: query: -- distinct, corr -explain +POSTHOOK: query: explain select * from src b where b.key in @@ -1044,15 +1038,13 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- non agg, corr, having -explain +PREHOOK: query: explain select key, value, count(*) from src b group by key, value having count(*) in (select count(*) from src where src.key > '9' and src.value = b.value group by key ) PREHOOK: type: QUERY -POSTHOOK: query: -- non agg, corr, having -explain +POSTHOOK: query: explain select key, value, count(*) from src b group by key, value @@ -1299,8 +1291,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- non agg, corr -explain +PREHOOK: query: explain select p_mfgr, b.p_name, p_size from part b where b.p_name not in @@ -1309,8 +1300,7 @@ where b.p_name not in where r <= 2 and b.p_mfgr = p_mfgr ) PREHOOK: type: QUERY -POSTHOOK: query: -- non agg, corr -explain +POSTHOOK: query: explain select p_mfgr, b.p_name, p_size from part b where b.p_name not in diff --git a/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out b/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out index d60ba82..c46ba12 100644 --- a/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out +++ b/ql/src/test/results/clientpositive/temp_table_display_colstats_tbllvl.q.out @@ -1,11 +1,8 @@ -PREHOOK: query: -- Based on display_colstats_tbllvl.q, output should be almost exactly the same. -DROP TABLE IF EXISTS UserVisits_web_text_none +PREHOOK: query: DROP TABLE IF EXISTS UserVisits_web_text_none PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Based on display_colstats_tbllvl.q, output should be almost exactly the same. -DROP TABLE IF EXISTS UserVisits_web_text_none +POSTHOOK: query: DROP TABLE IF EXISTS UserVisits_web_text_none POSTHOOK: type: DROPTABLE -PREHOOK: query: -- Hack, set external location because generated filename changes during test runs -CREATE TEMPORARY EXTERNAL TABLE UserVisits_web_text_none ( +PREHOOK: query: CREATE TEMPORARY EXTERNAL TABLE UserVisits_web_text_none ( sourceIP string, destURL string, visitDate string, @@ -21,8 +18,7 @@ PREHOOK: type: CREATETABLE #### A masked pattern was here #### PREHOOK: Output: database:default PREHOOK: Output: default@UserVisits_web_text_none -POSTHOOK: query: -- Hack, set external location because generated filename changes during test runs -CREATE TEMPORARY EXTERNAL TABLE UserVisits_web_text_none ( +POSTHOOK: query: CREATE TEMPORARY EXTERNAL TABLE UserVisits_web_text_none ( sourceIP string, destURL string, visitDate string, diff --git a/ql/src/test/results/clientpositive/temp_table_gb1.q.out b/ql/src/test/results/clientpositive/temp_table_gb1.q.out index 573acce..1cef596 100644 --- a/ql/src/test/results/clientpositive/temp_table_gb1.q.out +++ b/ql/src/test/results/clientpositive/temp_table_gb1.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Taken from groupby2.q -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest_g2 -POSTHOOK: query: -- Taken from groupby2.q -CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest_g2(key STRING, c1 INT, c2 STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest_g2 diff --git a/ql/src/test/results/clientpositive/temp_table_join1.q.out b/ql/src/test/results/clientpositive/temp_table_join1.q.out index 9b60558..f30bc83 100644 --- a/ql/src/test/results/clientpositive/temp_table_join1.q.out +++ b/ql/src/test/results/clientpositive/temp_table_join1.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src_nontemp AS SELECT * FROM src limit 10 +PREHOOK: query: CREATE TABLE src_nontemp AS SELECT * FROM src limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@src_nontemp -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE src_nontemp AS SELECT * FROM src limit 10 +POSTHOOK: query: CREATE TABLE src_nontemp AS SELECT * FROM src limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default @@ -24,13 +20,11 @@ POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default POSTHOOK: Output: default@src_temp -PREHOOK: query: -- Non temp table join -EXPLAIN +PREHOOK: query: EXPLAIN FROM src_nontemp src1 JOIN src_nontemp src2 ON (src1.key = src2.key) SELECT src1.key, src2.value PREHOOK: type: QUERY -POSTHOOK: query: -- Non temp table join -EXPLAIN +POSTHOOK: query: EXPLAIN FROM src_nontemp src1 JOIN src_nontemp src2 ON (src1.key = src2.key) SELECT src1.key, src2.value POSTHOOK: type: QUERY @@ -120,13 +114,11 @@ POSTHOOK: Input: default@src_nontemp 484 val_484 86 val_86 98 val_98 -PREHOOK: query: -- Non temp table join with temp table -EXPLAIN +PREHOOK: query: EXPLAIN FROM src_nontemp src1 JOIN src_temp src2 ON (src1.key = src2.key) SELECT src1.key, src2.value PREHOOK: type: QUERY -POSTHOOK: query: -- Non temp table join with temp table -EXPLAIN +POSTHOOK: query: EXPLAIN FROM src_nontemp src1 JOIN src_temp src2 ON (src1.key = src2.key) SELECT src1.key, src2.value POSTHOOK: type: QUERY @@ -218,13 +210,11 @@ POSTHOOK: Input: default@src_temp 484 val_484 86 val_86 98 val_98 -PREHOOK: query: -- temp table join with temp table -EXPLAIN +PREHOOK: query: EXPLAIN FROM src_temp src1 JOIN src_temp src2 ON (src1.key = src2.key) SELECT src1.key, src2.value PREHOOK: type: QUERY -POSTHOOK: query: -- temp table join with temp table -EXPLAIN +POSTHOOK: query: EXPLAIN FROM src_temp src1 JOIN src_temp src2 ON (src1.key = src2.key) SELECT src1.key, src2.value POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/temp_table_names.q.out b/ql/src/test/results/clientpositive/temp_table_names.q.out index e2b368b..f8ad01a 100644 --- a/ql/src/test/results/clientpositive/temp_table_names.q.out +++ b/ql/src/test/results/clientpositive/temp_table_names.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Test temp tables with upper/lower case names -create temporary table Default.Temp_Table_Names (C1 string, c2 string) +PREHOOK: query: create temporary table Default.Temp_Table_Names (C1 string, c2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: Default@Temp_Table_Names PREHOOK: Output: database:default -POSTHOOK: query: -- Test temp tables with upper/lower case names -create temporary table Default.Temp_Table_Names (C1 string, c2 string) +POSTHOOK: query: create temporary table Default.Temp_Table_Names (C1 string, c2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: Default@Temp_Table_Names POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/temp_table_options1.q.out b/ql/src/test/results/clientpositive/temp_table_options1.q.out index 01cc978..be31a5a 100644 --- a/ql/src/test/results/clientpositive/temp_table_options1.q.out +++ b/ql/src/test/results/clientpositive/temp_table_options1.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- Delimiter test, taken from delimiter.q -create temporary table impressions (imp string, msg string) +PREHOOK: query: create temporary table impressions (imp string, msg string) row format delimited fields terminated by '\t' lines terminated by '\n' @@ -7,8 +6,7 @@ stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@impressions -POSTHOOK: query: -- Delimiter test, taken from delimiter.q -create temporary table impressions (imp string, msg string) +POSTHOOK: query: create temporary table impressions (imp string, msg string) row format delimited fields terminated by '\t' lines terminated by '\n' @@ -54,12 +52,7 @@ POSTHOOK: query: drop table impressions POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@impressions POSTHOOK: Output: default@impressions -PREHOOK: query: -- Try different SerDe formats, taken from date_serde.q - --- --- RegexSerDe --- -create temporary table date_serde_regex ( +PREHOOK: query: create temporary table date_serde_regex ( ORIGIN_CITY_NAME string, DEST_CITY_NAME string, FL_DATE date, @@ -74,12 +67,7 @@ stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@date_serde_regex -POSTHOOK: query: -- Try different SerDe formats, taken from date_serde.q - --- --- RegexSerDe --- -create temporary table date_serde_regex ( +POSTHOOK: query: create temporary table date_serde_regex ( ORIGIN_CITY_NAME string, DEST_CITY_NAME string, FL_DATE date, @@ -267,20 +255,14 @@ POSTHOOK: Input: default@date_serde_regex 2010-10-29 12 2010-10-30 11 2010-10-31 8 -PREHOOK: query: -- --- LazyBinary --- -create temporary table date_serde_lb ( +PREHOOK: query: create temporary table date_serde_lb ( c1 date, c2 int ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@date_serde_lb -POSTHOOK: query: -- --- LazyBinary --- -create temporary table date_serde_lb ( +POSTHOOK: query: create temporary table date_serde_lb ( c1 date, c2 int ) @@ -325,20 +307,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@date_serde_lb #### A masked pattern was here #### 2010-10-20 1064 -PREHOOK: query: -- --- LazySimple --- -create temporary table date_serde_ls ( +PREHOOK: query: create temporary table date_serde_ls ( c1 date, c2 int ) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@date_serde_ls -POSTHOOK: query: -- --- LazySimple --- -create temporary table date_serde_ls ( +POSTHOOK: query: create temporary table date_serde_ls ( c1 date, c2 int ) @@ -383,20 +359,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@date_serde_ls #### A masked pattern was here #### 2010-10-20 1064 -PREHOOK: query: -- --- Columnar --- -create temporary table date_serde_c ( +PREHOOK: query: create temporary table date_serde_c ( c1 date, c2 int ) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@date_serde_c -POSTHOOK: query: -- --- Columnar --- -create temporary table date_serde_c ( +POSTHOOK: query: create temporary table date_serde_c ( c1 date, c2 int ) stored as rcfile @@ -441,20 +411,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@date_serde_c #### A masked pattern was here #### 2010-10-20 1064 -PREHOOK: query: -- --- LazyBinaryColumnar --- -create temporary table date_serde_lbc ( +PREHOOK: query: create temporary table date_serde_lbc ( c1 date, c2 int ) stored as rcfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@date_serde_lbc -POSTHOOK: query: -- --- LazyBinaryColumnar --- -create temporary table date_serde_lbc ( +POSTHOOK: query: create temporary table date_serde_lbc ( c1 date, c2 int ) stored as rcfile @@ -499,20 +463,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@date_serde_lbc #### A masked pattern was here #### 2010-10-20 1064 -PREHOOK: query: -- --- ORC --- -create temporary table date_serde_orc ( +PREHOOK: query: create temporary table date_serde_orc ( c1 date, c2 int ) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@date_serde_orc -POSTHOOK: query: -- --- ORC --- -create temporary table date_serde_orc ( +POSTHOOK: query: create temporary table date_serde_orc ( c1 date, c2 int ) stored as orc diff --git a/ql/src/test/results/clientpositive/temp_table_precedence.q.out b/ql/src/test/results/clientpositive/temp_table_precedence.q.out index 2b2e0aa..d75c3cd 100644 --- a/ql/src/test/results/clientpositive/temp_table_precedence.q.out +++ b/ql/src/test/results/clientpositive/temp_table_precedence.q.out @@ -4,13 +4,11 @@ PREHOOK: Output: database:ttp POSTHOOK: query: create database ttp POSTHOOK: type: CREATEDATABASE POSTHOOK: Output: database:ttp -PREHOOK: query: -- Create non-temp tables -create table ttp.tab1 (a1 string, a2 string) +PREHOOK: query: create table ttp.tab1 (a1 string, a2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:ttp PREHOOK: Output: ttp@tab1 -POSTHOOK: query: -- Create non-temp tables -create table ttp.tab1 (a1 string, a2 string) +POSTHOOK: query: create table ttp.tab1 (a1 string, a2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:ttp POSTHOOK: Output: ttp@tab1 @@ -78,13 +76,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: ttp@tab2 #### A masked pattern was here #### 2 val_2 -PREHOOK: query: -- Now create temp table with same name -create temporary table ttp.tab1 (c1 int, c2 string) +PREHOOK: query: create temporary table ttp.tab1 (c1 int, c2 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:ttp PREHOOK: Output: ttp@tab1 -POSTHOOK: query: -- Now create temp table with same name -create temporary table ttp.tab1 (c1 int, c2 string) +POSTHOOK: query: create temporary table ttp.tab1 (c1 int, c2 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:ttp POSTHOOK: Output: ttp@tab1 @@ -98,12 +94,10 @@ POSTHOOK: Input: default@src POSTHOOK: Output: ttp@tab1 POSTHOOK: Lineage: tab1.c1 EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: tab1.c2 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- describe/select should now use temp table -describe ttp.tab1 +PREHOOK: query: describe ttp.tab1 PREHOOK: type: DESCTABLE PREHOOK: Input: ttp@tab1 -POSTHOOK: query: -- describe/select should now use temp table -describe ttp.tab1 +POSTHOOK: query: describe ttp.tab1 POSTHOOK: type: DESCTABLE POSTHOOK: Input: ttp@tab1 c1 int @@ -119,12 +113,10 @@ POSTHOOK: Input: ttp@tab1 0 val_0 0 val_0 0 val_0 -PREHOOK: query: -- rename the temp table, and now we can see our non-temp table again -use ttp +PREHOOK: query: use ttp PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:ttp -POSTHOOK: query: -- rename the temp table, and now we can see our non-temp table again -use ttp +POSTHOOK: query: use ttp POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:ttp PREHOOK: query: alter table tab1 rename to tab2 @@ -161,12 +153,10 @@ POSTHOOK: Input: ttp@tab1 5 val_5 5 val_5 5 val_5 -PREHOOK: query: -- now the non-temp tab2 should be hidden -describe ttp.tab2 +PREHOOK: query: describe ttp.tab2 PREHOOK: type: DESCTABLE PREHOOK: Input: ttp@tab2 -POSTHOOK: query: -- now the non-temp tab2 should be hidden -describe ttp.tab2 +POSTHOOK: query: describe ttp.tab2 POSTHOOK: type: DESCTABLE POSTHOOK: Input: ttp@tab2 c1 int @@ -182,13 +172,11 @@ POSTHOOK: Input: ttp@tab2 0 val_0 0 val_0 0 val_0 -PREHOOK: query: -- drop the temp table, and now we should be able to see the non-temp tab2 again -drop table ttp.tab2 +PREHOOK: query: drop table ttp.tab2 PREHOOK: type: DROPTABLE PREHOOK: Input: ttp@tab2 PREHOOK: Output: ttp@tab2 -POSTHOOK: query: -- drop the temp table, and now we should be able to see the non-temp tab2 again -drop table ttp.tab2 +POSTHOOK: query: drop table ttp.tab2 POSTHOOK: type: DROPTABLE POSTHOOK: Input: ttp@tab2 POSTHOOK: Output: ttp@tab2 diff --git a/ql/src/test/results/clientpositive/temp_table_subquery1.q.out b/ql/src/test/results/clientpositive/temp_table_subquery1.q.out index 8a9c537..aae1dcb 100644 --- a/ql/src/test/results/clientpositive/temp_table_subquery1.q.out +++ b/ql/src/test/results/clientpositive/temp_table_subquery1.q.out @@ -8,8 +8,7 @@ POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default POSTHOOK: Output: default@src_temp -PREHOOK: query: -- subquery exists -select * +PREHOOK: query: select * from src_temp b where exists (select a.key @@ -19,8 +18,7 @@ where exists PREHOOK: type: QUERY PREHOOK: Input: default@src_temp #### A masked pattern was here #### -POSTHOOK: query: -- subquery exists -select * +POSTHOOK: query: select * from src_temp b where exists (select a.key @@ -41,15 +39,13 @@ POSTHOOK: Input: default@src_temp 97 val_97 98 val_98 98 val_98 -PREHOOK: query: -- subquery in -select * +PREHOOK: query: select * from src_temp where src_temp.key in (select key from src_temp s1 where s1.key > '9') PREHOOK: type: QUERY PREHOOK: Input: default@src_temp #### A masked pattern was here #### -POSTHOOK: query: -- subquery in -select * +POSTHOOK: query: select * from src_temp where src_temp.key in (select key from src_temp s1 where s1.key > '9') POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out b/ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out index 61a74f0..28f229e 100644 --- a/ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out +++ b/ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out @@ -628,13 +628,11 @@ Manufacturer#5 1241.29 Manufacturer#5 1424.0900000000001 Manufacturer#5 1515.25 Manufacturer#5 1534.532 -PREHOOK: query: -- multi table insert test -create table t1 (a1 int, b1 string) +PREHOOK: query: create table t1 (a1 int, b1 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- multi table insert test -create table t1 (a1 int, b1 string) +POSTHOOK: query: create table t1 (a1 int, b1 string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 diff --git a/ql/src/test/results/clientpositive/tez_join_hash.q.out b/ql/src/test/results/clientpositive/tez_join_hash.q.out index 6b366af..c5e4757 100644 --- a/ql/src/test/results/clientpositive/tez_join_hash.q.out +++ b/ql/src/test/results/clientpositive/tez_join_hash.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table orc_src (key string, value string) STORED AS ORC +PREHOOK: query: create table orc_src (key string, value string) STORED AS ORC PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@orc_src -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table orc_src (key string, value string) STORED AS ORC +POSTHOOK: query: create table orc_src (key string, value string) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@orc_src diff --git a/ql/src/test/results/clientpositive/timestamp.q.out b/ql/src/test/results/clientpositive/timestamp.q.out index 7c08ec8..9d0ceef 100644 --- a/ql/src/test/results/clientpositive/timestamp.q.out +++ b/ql/src/test/results/clientpositive/timestamp.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain select cast('2011-01-01 01:01:01' as timestamp) as c from src union select cast('2011-01-01 01:01:01' as timestamp) as c from src limit 5 +PREHOOK: query: explain select cast('2011-01-01 01:01:01' as timestamp) as c from src union select cast('2011-01-01 01:01:01' as timestamp) as c from src limit 5 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain select cast('2011-01-01 01:01:01' as timestamp) as c from src union select cast('2011-01-01 01:01:01' as timestamp) as c from src limit 5 +POSTHOOK: query: explain select cast('2011-01-01 01:01:01' as timestamp) as c from src union select cast('2011-01-01 01:01:01' as timestamp) as c from src limit 5 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-1 is a root stage diff --git a/ql/src/test/results/clientpositive/timestamp_comparison2.q.out b/ql/src/test/results/clientpositive/timestamp_comparison2.q.out index 76ac21d..8ef2552 100644 --- a/ql/src/test/results/clientpositive/timestamp_comparison2.q.out +++ b/ql/src/test/results/clientpositive/timestamp_comparison2.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- Test timestamp-to-numeric comparison -select count(*) +PREHOOK: query: select count(*) FROM alltypesorc WHERE ((ctinyint != 0) @@ -11,8 +10,7 @@ WHERE PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: -- Test timestamp-to-numeric comparison -select count(*) +POSTHOOK: query: select count(*) FROM alltypesorc WHERE ((ctinyint != 0) @@ -25,8 +23,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### 1826 -PREHOOK: query: -- Should have same result as previous query -select count(*) +PREHOOK: query: select count(*) FROM alltypesorc WHERE ((ctinyint != 0) @@ -38,8 +35,7 @@ WHERE PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: -- Should have same result as previous query -select count(*) +POSTHOOK: query: select count(*) FROM alltypesorc WHERE ((ctinyint != 0) diff --git a/ql/src/test/results/clientpositive/timestamp_formats.q.out b/ql/src/test/results/clientpositive/timestamp_formats.q.out index 9cc9b29..98afa30 100644 --- a/ql/src/test/results/clientpositive/timestamp_formats.q.out +++ b/ql/src/test/results/clientpositive/timestamp_formats.q.out @@ -57,13 +57,11 @@ POSTHOOK: Input: default@timestamp_formats 2029-07-07 19:19:19.191919191 2029-07-07 19:19:19.191919191 2029-07-07T19:19:19.191919191 NULL 2029-07-07T19:19:19 NULL 2030-08-08 20:20:20.202020202 2030-08-08 20:20:20.202020202 2030-08-08T20:20:20.202020202 NULL 2030-08-08T20:20:20 NULL 2031-09-09 21:21:21.212121212 2031-09-09 21:21:21.212121212 2031-09-09T21:21:21.212121212 NULL 2031-09-09T21:21:21 NULL -PREHOOK: query: -- Add single timestamp format. This should allow c3_ts to parse -ALTER TABLE timestamp_formats SET SERDEPROPERTIES ("timestamp.formats"="yyyy-MM-dd'T'HH:mm:ss") +PREHOOK: query: ALTER TABLE timestamp_formats SET SERDEPROPERTIES ("timestamp.formats"="yyyy-MM-dd'T'HH:mm:ss") PREHOOK: type: ALTERTABLE_SERDEPROPERTIES PREHOOK: Input: default@timestamp_formats PREHOOK: Output: default@timestamp_formats -POSTHOOK: query: -- Add single timestamp format. This should allow c3_ts to parse -ALTER TABLE timestamp_formats SET SERDEPROPERTIES ("timestamp.formats"="yyyy-MM-dd'T'HH:mm:ss") +POSTHOOK: query: ALTER TABLE timestamp_formats SET SERDEPROPERTIES ("timestamp.formats"="yyyy-MM-dd'T'HH:mm:ss") POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES POSTHOOK: Input: default@timestamp_formats POSTHOOK: Output: default@timestamp_formats @@ -96,13 +94,11 @@ POSTHOOK: Input: default@timestamp_formats 2029-07-07 19:19:19.191919191 2029-07-07 19:19:19.191919191 2029-07-07T19:19:19.191919191 NULL 2029-07-07T19:19:19 2029-07-07 19:19:19 2030-08-08 20:20:20.202020202 2030-08-08 20:20:20.202020202 2030-08-08T20:20:20.202020202 NULL 2030-08-08T20:20:20 2030-08-08 20:20:20 2031-09-09 21:21:21.212121212 2031-09-09 21:21:21.212121212 2031-09-09T21:21:21.212121212 NULL 2031-09-09T21:21:21 2031-09-09 21:21:21 -PREHOOK: query: -- Add another format, to allow c2_ts to parse -ALTER TABLE timestamp_formats SET SERDEPROPERTIES ("timestamp.formats"="yyyy-MM-dd'T'HH:mm:ss,yyyy-MM-dd'T'HH:mm:ss.SSSSSSSSS") +PREHOOK: query: ALTER TABLE timestamp_formats SET SERDEPROPERTIES ("timestamp.formats"="yyyy-MM-dd'T'HH:mm:ss,yyyy-MM-dd'T'HH:mm:ss.SSSSSSSSS") PREHOOK: type: ALTERTABLE_SERDEPROPERTIES PREHOOK: Input: default@timestamp_formats PREHOOK: Output: default@timestamp_formats -POSTHOOK: query: -- Add another format, to allow c2_ts to parse -ALTER TABLE timestamp_formats SET SERDEPROPERTIES ("timestamp.formats"="yyyy-MM-dd'T'HH:mm:ss,yyyy-MM-dd'T'HH:mm:ss.SSSSSSSSS") +POSTHOOK: query: ALTER TABLE timestamp_formats SET SERDEPROPERTIES ("timestamp.formats"="yyyy-MM-dd'T'HH:mm:ss,yyyy-MM-dd'T'HH:mm:ss.SSSSSSSSS") POSTHOOK: type: ALTERTABLE_SERDEPROPERTIES POSTHOOK: Input: default@timestamp_formats POSTHOOK: Output: default@timestamp_formats diff --git a/ql/src/test/results/clientpositive/timestamp_ints_casts.q.out b/ql/src/test/results/clientpositive/timestamp_ints_casts.q.out index c7f2a74..bc5ceb3 100644 --- a/ql/src/test/results/clientpositive/timestamp_ints_casts.q.out +++ b/ql/src/test/results/clientpositive/timestamp_ints_casts.q.out @@ -1,6 +1,6 @@ PREHOOK: query: explain select --- to timestamp + cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -13,12 +13,12 @@ select ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 PREHOOK: type: QUERY POSTHOOK: query: explain select --- to timestamp + cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -31,7 +31,7 @@ select ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -67,7 +67,7 @@ STAGE PLANS: ListSink PREHOOK: query: select --- to timestamp + cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -80,13 +80,13 @@ PREHOOK: query: select ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### POSTHOOK: query: select --- to timestamp + cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -99,7 +99,7 @@ POSTHOOK: query: select ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc @@ -132,7 +132,7 @@ POSTHOOK: Input: default@alltypesorc 1969-12-31 16:00:00.011 NULL 1969-12-27 18:49:09.583 1970-01-14 22:35:27 1969-12-31 16:00:11 NULL 1969-12-31 16:00:00.001 1969-12-31 16:00:00 1969-12-31 16:00:02.351 NULL NULL PREHOOK: query: explain select --- to timestamp + cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -145,12 +145,12 @@ select ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 PREHOOK: type: QUERY POSTHOOK: query: explain select --- to timestamp + cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -163,7 +163,7 @@ select ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -199,7 +199,7 @@ STAGE PLANS: ListSink PREHOOK: query: select --- to timestamp + cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -212,13 +212,13 @@ PREHOOK: query: select ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### POSTHOOK: query: select --- to timestamp + cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -231,7 +231,7 @@ POSTHOOK: query: select ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc diff --git a/ql/src/test/results/clientpositive/timestamp_udf.q.out b/ql/src/test/results/clientpositive/timestamp_udf.q.out index 904c894..47f84cb 100644 --- a/ql/src/test/results/clientpositive/timestamp_udf.q.out +++ b/ql/src/test/results/clientpositive/timestamp_udf.q.out @@ -42,15 +42,13 @@ POSTHOOK: Output: default@timestamp_udf POSTHOOK: Output: default@timestamp_udf_string POSTHOOK: Lineage: timestamp_udf.t EXPRESSION [] POSTHOOK: Lineage: timestamp_udf_string.t SIMPLE [] -PREHOOK: query: -- Test UDFs with Timestamp input -select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), +PREHOOK: query: select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), weekofyear(t), hour(t), minute(t), second(t), to_date(t) from timestamp_udf PREHOOK: type: QUERY PREHOOK: Input: default@timestamp_udf #### A masked pattern was here #### -POSTHOOK: query: -- Test UDFs with Timestamp input -select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), +POSTHOOK: query: select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), weekofyear(t), hour(t), minute(t), second(t), to_date(t) from timestamp_udf POSTHOOK: type: QUERY @@ -145,15 +143,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@timestamp_udf #### A masked pattern was here #### 2011-05-06 07:08:09.1234567 2011-05-06 12:08:09.1234567 2011-05-06 07:08:09.1234567 2011-05-06 12:08:09.1234567 -PREHOOK: query: -- Test UDFs with string input -select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), +PREHOOK: query: select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), weekofyear(t), hour(t), minute(t), second(t), to_date(t) from timestamp_udf_string PREHOOK: type: QUERY PREHOOK: Input: default@timestamp_udf_string #### A masked pattern was here #### -POSTHOOK: query: -- Test UDFs with string input -select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), +POSTHOOK: query: select unix_timestamp(t), year(t), month(t), day(t), dayofmonth(t), weekofyear(t), hour(t), minute(t), second(t), to_date(t) from timestamp_udf_string POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/transform2.q.out b/ql/src/test/results/clientpositive/transform2.q.out index 28d098d..aeeaebf 100644 --- a/ql/src/test/results/clientpositive/transform2.q.out +++ b/ql/src/test/results/clientpositive/transform2.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Transform with a function that has many parameters -SELECT TRANSFORM(substr(key, 1, 2)) USING 'cat' FROM src LIMIT 1 +PREHOOK: query: SELECT TRANSFORM(substr(key, 1, 2)) USING 'cat' FROM src LIMIT 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Transform with a function that has many parameters -SELECT TRANSFORM(substr(key, 1, 2)) USING 'cat' FROM src LIMIT 1 +POSTHOOK: query: SELECT TRANSFORM(substr(key, 1, 2)) USING 'cat' FROM src LIMIT 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/transform_acid.q.out b/ql/src/test/results/clientpositive/transform_acid.q.out index 29d0638..bfa9aee 100644 --- a/ql/src/test/results/clientpositive/transform_acid.q.out +++ b/ql/src/test/results/clientpositive/transform_acid.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- EXCLUDE_OS_WINDOWS - -create table transform_acid(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: query: create table transform_acid(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@transform_acid -POSTHOOK: query: -- EXCLUDE_OS_WINDOWS - -create table transform_acid(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: query: create table transform_acid(a int, b varchar(128)) clustered by (a) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true') POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@transform_acid diff --git a/ql/src/test/results/clientpositive/transform_ppr1.q.out b/ql/src/test/results/clientpositive/transform_ppr1.q.out index f15646a..4f3cd8d 100644 --- a/ql/src/test/results/clientpositive/transform_ppr1.q.out +++ b/ql/src/test/results/clientpositive/transform_ppr1.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED FROM ( FROM srcpart src SELECT TRANSFORM(src.ds, src.key, src.value) @@ -9,9 +7,7 @@ FROM ( ) tmap SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 AND tmap.ds = '2008-04-08' PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED FROM ( FROM srcpart src SELECT TRANSFORM(src.ds, src.key, src.value) diff --git a/ql/src/test/results/clientpositive/transform_ppr2.q.out b/ql/src/test/results/clientpositive/transform_ppr2.q.out index db99985..d673318 100644 --- a/ql/src/test/results/clientpositive/transform_ppr2.q.out +++ b/ql/src/test/results/clientpositive/transform_ppr2.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED FROM ( FROM srcpart src SELECT TRANSFORM(src.ds, src.key, src.value) @@ -10,9 +8,7 @@ FROM ( ) tmap SELECT tmap.tkey, tmap.tvalue WHERE tmap.tkey < 100 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED FROM ( FROM srcpart src SELECT TRANSFORM(src.ds, src.key, src.value) diff --git a/ql/src/test/results/clientpositive/truncate_column.q.out b/ql/src/test/results/clientpositive/truncate_column.q.out index 2efba75..cc3bc89 100644 --- a/ql/src/test/results/clientpositive/truncate_column.q.out +++ b/ql/src/test/results/clientpositive/truncate_column.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- Tests truncating column(s) from a table, also tests that stats are updated - -CREATE TABLE test_tab (key STRING, value STRING) +PREHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Tests truncating column(s) from a table, also tests that stats are updated - -CREATE TABLE test_tab (key STRING, value STRING) +POSTHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe' STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -75,13 +71,11 @@ POSTHOOK: Input: default@test_tab 484 val_484 86 val_86 98 val_98 -PREHOOK: query: -- Truncate 1 column -TRUNCATE TABLE test_tab COLUMNS (key) +PREHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key) PREHOOK: type: TRUNCATETABLE PREHOOK: Input: default@test_tab PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Truncate 1 column -TRUNCATE TABLE test_tab COLUMNS (key) +POSTHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key) POSTHOOK: type: TRUNCATETABLE POSTHOOK: Input: default@test_tab POSTHOOK: Output: default@test_tab @@ -119,13 +113,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- First column should be null -SELECT * FROM test_tab ORDER BY value +PREHOOK: query: SELECT * FROM test_tab ORDER BY value PREHOOK: type: QUERY PREHOOK: Input: default@test_tab #### A masked pattern was here #### -POSTHOOK: query: -- First column should be null -SELECT * FROM test_tab ORDER BY value +POSTHOOK: query: SELECT * FROM test_tab ORDER BY value POSTHOOK: type: QUERY POSTHOOK: Input: default@test_tab #### A masked pattern was here #### @@ -139,13 +131,11 @@ NULL val_409 NULL val_484 NULL val_86 NULL val_98 -PREHOOK: query: -- Truncate multiple columns -INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (10 rows) +PREHOOK: query: INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (10 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Truncate multiple columns -INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (10 rows) +POSTHOOK: query: INSERT OVERWRITE TABLE test_tab SELECT * FROM src tablesample (10 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src POSTHOOK: Output: default@test_tab @@ -193,13 +183,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Both columns should be null -SELECT * FROM test_tab ORDER BY value +PREHOOK: query: SELECT * FROM test_tab ORDER BY value PREHOOK: type: QUERY PREHOOK: Input: default@test_tab #### A masked pattern was here #### -POSTHOOK: query: -- Both columns should be null -SELECT * FROM test_tab ORDER BY value +POSTHOOK: query: SELECT * FROM test_tab ORDER BY value POSTHOOK: type: QUERY POSTHOOK: Input: default@test_tab #### A masked pattern was here #### @@ -213,13 +201,11 @@ NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: -- Truncate columns again -TRUNCATE TABLE test_tab COLUMNS (key, value) +PREHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key, value) PREHOOK: type: TRUNCATETABLE PREHOOK: Input: default@test_tab PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Truncate columns again -TRUNCATE TABLE test_tab COLUMNS (key, value) +POSTHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key, value) POSTHOOK: type: TRUNCATETABLE POSTHOOK: Input: default@test_tab POSTHOOK: Output: default@test_tab @@ -257,13 +243,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Both columns should be null -SELECT * FROM test_tab ORDER BY value +PREHOOK: query: SELECT * FROM test_tab ORDER BY value PREHOOK: type: QUERY PREHOOK: Input: default@test_tab #### A masked pattern was here #### -POSTHOOK: query: -- Both columns should be null -SELECT * FROM test_tab ORDER BY value +POSTHOOK: query: SELECT * FROM test_tab ORDER BY value POSTHOOK: type: QUERY POSTHOOK: Input: default@test_tab #### A masked pattern was here #### @@ -277,13 +261,11 @@ NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: -- Test truncating with a binary serde -ALTER TABLE test_tab SET SERDE 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +PREHOOK: query: ALTER TABLE test_tab SET SERDE 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' PREHOOK: type: ALTERTABLE_SERIALIZER PREHOOK: Input: default@test_tab PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Test truncating with a binary serde -ALTER TABLE test_tab SET SERDE 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' +POSTHOOK: query: ALTER TABLE test_tab SET SERDE 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe' POSTHOOK: type: ALTERTABLE_SERIALIZER POSTHOOK: Input: default@test_tab POSTHOOK: Output: default@test_tab @@ -351,13 +333,11 @@ POSTHOOK: Input: default@test_tab 484 val_484 86 val_86 98 val_98 -PREHOOK: query: -- Truncate 1 column -TRUNCATE TABLE test_tab COLUMNS (key) +PREHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key) PREHOOK: type: TRUNCATETABLE PREHOOK: Input: default@test_tab PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Truncate 1 column -TRUNCATE TABLE test_tab COLUMNS (key) +POSTHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key) POSTHOOK: type: TRUNCATETABLE POSTHOOK: Input: default@test_tab POSTHOOK: Output: default@test_tab @@ -396,13 +376,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- First column should be null -SELECT * FROM test_tab ORDER BY value +PREHOOK: query: SELECT * FROM test_tab ORDER BY value PREHOOK: type: QUERY PREHOOK: Input: default@test_tab #### A masked pattern was here #### -POSTHOOK: query: -- First column should be null -SELECT * FROM test_tab ORDER BY value +POSTHOOK: query: SELECT * FROM test_tab ORDER BY value POSTHOOK: type: QUERY POSTHOOK: Input: default@test_tab #### A masked pattern was here #### @@ -416,13 +394,11 @@ NULL val_409 NULL val_484 NULL val_86 NULL val_98 -PREHOOK: query: -- Truncate 2 columns -TRUNCATE TABLE test_tab COLUMNS (key, value) +PREHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key, value) PREHOOK: type: TRUNCATETABLE PREHOOK: Input: default@test_tab PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Truncate 2 columns -TRUNCATE TABLE test_tab COLUMNS (key, value) +POSTHOOK: query: TRUNCATE TABLE test_tab COLUMNS (key, value) POSTHOOK: type: TRUNCATETABLE POSTHOOK: Input: default@test_tab POSTHOOK: Output: default@test_tab @@ -461,13 +437,11 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- Both columns should be null -SELECT * FROM test_tab ORDER BY value +PREHOOK: query: SELECT * FROM test_tab ORDER BY value PREHOOK: type: QUERY PREHOOK: Input: default@test_tab #### A masked pattern was here #### -POSTHOOK: query: -- Both columns should be null -SELECT * FROM test_tab ORDER BY value +POSTHOOK: query: SELECT * FROM test_tab ORDER BY value POSTHOOK: type: QUERY POSTHOOK: Input: default@test_tab #### A masked pattern was here #### @@ -481,13 +455,11 @@ NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: -- Test truncating a partition -CREATE TABLE test_tab_part (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE +PREHOOK: query: CREATE TABLE test_tab_part (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_tab_part -POSTHOOK: query: -- Test truncating a partition -CREATE TABLE test_tab_part (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE +POSTHOOK: query: CREATE TABLE test_tab_part (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_tab_part @@ -606,14 +578,12 @@ Bucket Columns: [] Sort Columns: [] Storage Desc Params: serialization.format 1 -PREHOOK: query: -- First column should be null -SELECT * FROM test_tab_part WHERE part = '1' ORDER BY value +PREHOOK: query: SELECT * FROM test_tab_part WHERE part = '1' ORDER BY value PREHOOK: type: QUERY PREHOOK: Input: default@test_tab_part PREHOOK: Input: default@test_tab_part@part=1 #### A masked pattern was here #### -POSTHOOK: query: -- First column should be null -SELECT * FROM test_tab_part WHERE part = '1' ORDER BY value +POSTHOOK: query: SELECT * FROM test_tab_part WHERE part = '1' ORDER BY value POSTHOOK: type: QUERY POSTHOOK: Input: default@test_tab_part POSTHOOK: Input: default@test_tab_part@part=1 diff --git a/ql/src/test/results/clientpositive/truncate_column_buckets.q.out b/ql/src/test/results/clientpositive/truncate_column_buckets.q.out index 7147c22..cab0b83 100644 --- a/ql/src/test/results/clientpositive/truncate_column_buckets.q.out +++ b/ql/src/test/results/clientpositive/truncate_column_buckets.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Tests truncating columns from a bucketed table, table should remain bucketed - -CREATE TABLE test_tab (key STRING, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS RCFILE +PREHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Tests truncating columns from a bucketed table, table should remain bucketed - -CREATE TABLE test_tab (key STRING, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS RCFILE +POSTHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_tab @@ -20,16 +16,14 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_tab POSTHOOK: Lineage: test_tab.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_tab.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Check how many rows there are in each bucket, there should be two rows -SELECT cnt FROM ( +PREHOOK: query: SELECT cnt FROM ( SELECT INPUT__FILE__NAME file_name, count(*) cnt FROM test_tab GROUP BY INPUT__FILE__NAME ORDER BY file_name DESC)a PREHOOK: type: QUERY PREHOOK: Input: default@test_tab #### A masked pattern was here #### -POSTHOOK: query: -- Check how many rows there are in each bucket, there should be two rows -SELECT cnt FROM ( +POSTHOOK: query: SELECT cnt FROM ( SELECT INPUT__FILE__NAME file_name, count(*) cnt FROM test_tab GROUP BY INPUT__FILE__NAME ORDER BY file_name DESC)a @@ -38,28 +32,22 @@ POSTHOOK: Input: default@test_tab #### A masked pattern was here #### 258 242 -PREHOOK: query: -- Truncate a column on which the table is not bucketed -TRUNCATE TABLE test_tab COLUMNS (value) +PREHOOK: query: TRUNCATE TABLE test_tab COLUMNS (value) PREHOOK: type: TRUNCATETABLE PREHOOK: Input: default@test_tab PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Truncate a column on which the table is not bucketed -TRUNCATE TABLE test_tab COLUMNS (value) +POSTHOOK: query: TRUNCATE TABLE test_tab COLUMNS (value) POSTHOOK: type: TRUNCATETABLE POSTHOOK: Input: default@test_tab POSTHOOK: Output: default@test_tab -PREHOOK: query: -- Check how many rows there are in each bucket, this should produce the same rows as before --- because truncate should not break bucketing -SELECT cnt FROM ( +PREHOOK: query: SELECT cnt FROM ( SELECT INPUT__FILE__NAME file_name, count(*) cnt FROM test_tab GROUP BY INPUT__FILE__NAME ORDER BY file_name DESC)a PREHOOK: type: QUERY PREHOOK: Input: default@test_tab #### A masked pattern was here #### -POSTHOOK: query: -- Check how many rows there are in each bucket, this should produce the same rows as before --- because truncate should not break bucketing -SELECT cnt FROM ( +POSTHOOK: query: SELECT cnt FROM ( SELECT INPUT__FILE__NAME file_name, count(*) cnt FROM test_tab GROUP BY INPUT__FILE__NAME ORDER BY file_name DESC)a diff --git a/ql/src/test/results/clientpositive/truncate_column_list_bucket.q.out b/ql/src/test/results/clientpositive/truncate_column_list_bucket.q.out index 05ca155..9320f5f 100644 --- a/ql/src/test/results/clientpositive/truncate_column_list_bucket.q.out +++ b/ql/src/test/results/clientpositive/truncate_column_list_bucket.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- Tests truncating a column from a list bucketing table - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - -CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE +PREHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Tests truncating a column from a list bucketing table - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) - -CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE +POSTHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) PARTITIONED BY (part STRING) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_tab @@ -57,15 +49,9 @@ POSTHOOK: query: TRUNCATE TABLE test_tab PARTITION (part ='1') COLUMNS (value) POSTHOOK: type: TRUNCATETABLE POSTHOOK: Input: default@test_tab POSTHOOK: Output: default@test_tab@part=1 -PREHOOK: query: -- In the following select statements the list bucketing optimization should still be used --- In both cases value should be null - -EXPLAIN EXTENDED SELECT * FROM test_tab WHERE part = '1' AND key = '484' +PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_tab WHERE part = '1' AND key = '484' PREHOOK: type: QUERY -POSTHOOK: query: -- In the following select statements the list bucketing optimization should still be used --- In both cases value should be null - -EXPLAIN EXTENDED SELECT * FROM test_tab WHERE part = '1' AND key = '484' +POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM test_tab WHERE part = '1' AND key = '484' POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/truncate_column_merge.q.out b/ql/src/test/results/clientpositive/truncate_column_merge.q.out index 2693b90..8ff740d 100644 --- a/ql/src/test/results/clientpositive/truncate_column_merge.q.out +++ b/ql/src/test/results/clientpositive/truncate_column_merge.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Tests truncating a column from a table with multiple files, then merging those files - -CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE +PREHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_tab -POSTHOOK: query: -- Tests truncating a column from a table with multiple files, then merging those files - -CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE +POSTHOOK: query: CREATE TABLE test_tab (key STRING, value STRING) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_tab @@ -30,13 +26,11 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@test_tab POSTHOOK: Lineage: test_tab.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: test_tab.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- The value should be 2 indicating the table has 2 files -SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab +PREHOOK: query: SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab PREHOOK: type: QUERY PREHOOK: Input: default@test_tab #### A masked pattern was here #### -POSTHOOK: query: -- The value should be 2 indicating the table has 2 files -SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab +POSTHOOK: query: SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab POSTHOOK: type: QUERY POSTHOOK: Input: default@test_tab #### A masked pattern was here #### @@ -57,13 +51,11 @@ POSTHOOK: query: ALTER TABLE test_tab CONCATENATE POSTHOOK: type: ALTER_TABLE_MERGE POSTHOOK: Input: default@test_tab POSTHOOK: Output: default@test_tab -PREHOOK: query: -- The first column (key) should be null for all 10 rows -SELECT * FROM test_tab ORDER BY value +PREHOOK: query: SELECT * FROM test_tab ORDER BY value PREHOOK: type: QUERY PREHOOK: Input: default@test_tab #### A masked pattern was here #### -POSTHOOK: query: -- The first column (key) should be null for all 10 rows -SELECT * FROM test_tab ORDER BY value +POSTHOOK: query: SELECT * FROM test_tab ORDER BY value POSTHOOK: type: QUERY POSTHOOK: Input: default@test_tab #### A masked pattern was here #### @@ -77,13 +69,11 @@ NULL val_311 NULL val_311 NULL val_86 NULL val_86 -PREHOOK: query: -- The value should be 1 indicating the table has 1 file -SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab +PREHOOK: query: SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab PREHOOK: type: QUERY PREHOOK: Input: default@test_tab #### A masked pattern was here #### -POSTHOOK: query: -- The value should be 1 indicating the table has 1 file -SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab +POSTHOOK: query: SELECT COUNT(DISTINCT INPUT__FILE__NAME) FROM test_tab POSTHOOK: type: QUERY POSTHOOK: Input: default@test_tab #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/truncate_table.q.out b/ql/src/test/results/clientpositive/truncate_table.q.out index aaf89ad..a3399e0 100644 --- a/ql/src/test/results/clientpositive/truncate_table.q.out +++ b/ql/src/test/results/clientpositive/truncate_table.q.out @@ -114,11 +114,9 @@ POSTHOOK: Output: default@srcpart_truncate@ds=2008-04-08/hr=11 POSTHOOK: Output: default@srcpart_truncate@ds=2008-04-08/hr=12 POSTHOOK: Output: default@srcpart_truncate@ds=2008-04-09/hr=11 POSTHOOK: Output: default@srcpart_truncate@ds=2008-04-09/hr=12 -PREHOOK: query: -- truncate non-partitioned table -explain TRUNCATE TABLE src_truncate +PREHOOK: query: explain TRUNCATE TABLE src_truncate PREHOOK: type: TRUNCATETABLE -POSTHOOK: query: -- truncate non-partitioned table -explain TRUNCATE TABLE src_truncate +POSTHOOK: query: explain TRUNCATE TABLE src_truncate POSTHOOK: type: TRUNCATETABLE STAGE DEPENDENCIES: Stage-0 is a root stage @@ -152,11 +150,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src_truncate #### A masked pattern was here #### 0 -PREHOOK: query: -- truncate a partition -explain TRUNCATE TABLE srcpart_truncate partition (ds='2008-04-08', hr='11') +PREHOOK: query: explain TRUNCATE TABLE srcpart_truncate partition (ds='2008-04-08', hr='11') PREHOOK: type: TRUNCATETABLE -POSTHOOK: query: -- truncate a partition -explain TRUNCATE TABLE srcpart_truncate partition (ds='2008-04-08', hr='11') +POSTHOOK: query: explain TRUNCATE TABLE srcpart_truncate partition (ds='2008-04-08', hr='11') POSTHOOK: type: TRUNCATETABLE STAGE DEPENDENCIES: Stage-0 is a root stage @@ -195,11 +191,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart_truncate #### A masked pattern was here #### 0 -PREHOOK: query: -- truncate partitions with partial spec -explain TRUNCATE TABLE srcpart_truncate partition (ds, hr='12') +PREHOOK: query: explain TRUNCATE TABLE srcpart_truncate partition (ds, hr='12') PREHOOK: type: TRUNCATETABLE -POSTHOOK: query: -- truncate partitions with partial spec -explain TRUNCATE TABLE srcpart_truncate partition (ds, hr='12') +POSTHOOK: query: explain TRUNCATE TABLE srcpart_truncate partition (ds, hr='12') POSTHOOK: type: TRUNCATETABLE STAGE DEPENDENCIES: Stage-0 is a root stage @@ -242,11 +236,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@srcpart_truncate #### A masked pattern was here #### 0 -PREHOOK: query: -- truncate partitioned table -explain TRUNCATE TABLE srcpart_truncate +PREHOOK: query: explain TRUNCATE TABLE srcpart_truncate PREHOOK: type: TRUNCATETABLE -POSTHOOK: query: -- truncate partitioned table -explain TRUNCATE TABLE srcpart_truncate +POSTHOOK: query: explain TRUNCATE TABLE srcpart_truncate POSTHOOK: type: TRUNCATETABLE STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/type_conversions_1.q.out b/ql/src/test/results/clientpositive/type_conversions_1.q.out index 7628a3d..6ce5d5d 100644 --- a/ql/src/test/results/clientpositive/type_conversions_1.q.out +++ b/ql/src/test/results/clientpositive/type_conversions_1.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- casting from null should yield null -select +PREHOOK: query: select cast(null as tinyint), cast(null as smallint), cast(null as int), @@ -17,8 +16,7 @@ from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- casting from null should yield null -select +POSTHOOK: query: select cast(null as tinyint), cast(null as smallint), cast(null as int), @@ -37,16 +35,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL -PREHOOK: query: -- Invalid conversions, should all be null -select +PREHOOK: query: select cast('abcd' as date), cast('abcd' as timestamp) from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Invalid conversions, should all be null -select +POSTHOOK: query: select cast('abcd' as date), cast('abcd' as timestamp) from src limit 1 diff --git a/ql/src/test/results/clientpositive/type_widening.q.out b/ql/src/test/results/clientpositive/type_widening.q.out index cef994b..30e5564 100644 --- a/ql/src/test/results/clientpositive/type_widening.q.out +++ b/ql/src/test/results/clientpositive/type_widening.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- Check for int, bigint automatic type widening conversions in UDFs, UNIONS -EXPLAIN SELECT COALESCE(0, 9223372036854775807) FROM src LIMIT 1 +PREHOOK: query: EXPLAIN SELECT COALESCE(0, 9223372036854775807) FROM src LIMIT 1 PREHOOK: type: QUERY -POSTHOOK: query: -- Check for int, bigint automatic type widening conversions in UDFs, UNIONS -EXPLAIN SELECT COALESCE(0, 9223372036854775807) FROM src LIMIT 1 +POSTHOOK: query: EXPLAIN SELECT COALESCE(0, 9223372036854775807) FROM src LIMIT 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/udaf_percentile_approx_23.q.out b/ql/src/test/results/clientpositive/udaf_percentile_approx_23.q.out index 580d98a..66c0c8f 100644 --- a/ql/src/test/results/clientpositive/udaf_percentile_approx_23.q.out +++ b/ql/src/test/results/clientpositive/udaf_percentile_approx_23.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- 0.23 changed input order of data in reducer task, which affects result of percentile_approx - -CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@bucket -POSTHOOK: query: -- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- 0.23 changed input order of data in reducer task, which affects result of percentile_approx - -CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE bucket (key double, value string) CLUSTERED BY (key) SORTED BY (key DESC) INTO 4 BUCKETS STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@bucket @@ -140,8 +134,7 @@ POSTHOOK: query: create table t12 (result array) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t12 -PREHOOK: query: -- disable map-side aggregation -FROM bucket +PREHOOK: query: FROM bucket insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) @@ -171,8 +164,7 @@ PREHOOK: Output: default@t6 PREHOOK: Output: default@t7 PREHOOK: Output: default@t8 PREHOOK: Output: default@t9 -POSTHOOK: query: -- disable map-side aggregation -FROM bucket +POSTHOOK: query: FROM bucket insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) @@ -322,8 +314,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@t12 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] -PREHOOK: query: -- enable map-side aggregation -FROM bucket +PREHOOK: query: FROM bucket insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) @@ -353,8 +344,7 @@ PREHOOK: Output: default@t6 PREHOOK: Output: default@t7 PREHOOK: Output: default@t8 PREHOOK: Output: default@t9 -POSTHOOK: query: -- enable map-side aggregation -FROM bucket +POSTHOOK: query: FROM bucket insert overwrite table t1 SELECT percentile_approx(cast(key AS double), 0.5) insert overwrite table t2 SELECT percentile_approx(cast(key AS double), 0.5, 100) insert overwrite table t3 SELECT percentile_approx(cast(key AS double), 0.5, 1000) @@ -504,12 +494,10 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@t12 #### A masked pattern was here #### [26.0,255.5,479.0,491.0] -PREHOOK: query: -- NaN -explain +PREHOOK: query: explain select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket PREHOOK: type: QUERY -POSTHOOK: query: -- NaN -explain +POSTHOOK: query: explain select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -565,12 +553,10 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@bucket #### A masked pattern was here #### true -PREHOOK: query: -- with CBO -explain +PREHOOK: query: explain select percentile_approx(key, 0.5) from bucket PREHOOK: type: QUERY -POSTHOOK: query: -- with CBO -explain +POSTHOOK: query: explain select percentile_approx(key, 0.5) from bucket POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/udaf_sum_list.q.out b/ql/src/test/results/clientpositive/udaf_sum_list.q.out index 3aa0f9f..8afb053 100644 --- a/ql/src/test/results/clientpositive/udaf_sum_list.q.out +++ b/ql/src/test/results/clientpositive/udaf_sum_list.q.out @@ -1,13 +1,7 @@ -PREHOOK: query: -- HIVE-5279 --- GenericUDAFSumList has Converter which does not have default constructor --- After -create temporary function sum_list as 'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSumList' +PREHOOK: query: create temporary function sum_list as 'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSumList' PREHOOK: type: CREATEFUNCTION PREHOOK: Output: sum_list -POSTHOOK: query: -- HIVE-5279 --- GenericUDAFSumList has Converter which does not have default constructor --- After -create temporary function sum_list as 'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSumList' +POSTHOOK: query: create temporary function sum_list as 'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFSumList' POSTHOOK: type: CREATEFUNCTION POSTHOOK: Output: sum_list PREHOOK: query: select sum_list(array(key, key)) from src diff --git a/ql/src/test/results/clientpositive/udf_aes_decrypt.q.out b/ql/src/test/results/clientpositive/udf_aes_decrypt.q.out index 83ff03d..5512d2f 100644 --- a/ql/src/test/results/clientpositive/udf_aes_decrypt.q.out +++ b/ql/src/test/results/clientpositive/udf_aes_decrypt.q.out @@ -56,8 +56,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### ABC ABC true true NULL NULL -PREHOOK: query: --bad key -select +PREHOOK: query: select aes_decrypt(unbase64("y6Ss+zCYObpCbgfWfyNWTw=="), '12345678901234567'), aes_decrypt(unbase64("y6Ss+zCYObpCbgfWfyNWTw=="), binary('123456789012345')), aes_decrypt(unbase64("y6Ss+zCYObpCbgfWfyNWTw=="), ''), @@ -67,8 +66,7 @@ aes_decrypt(unbase64("y6Ss+zCYObpCbgfWfyNWTw=="), cast(null as binary)) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: --bad key -select +POSTHOOK: query: select aes_decrypt(unbase64("y6Ss+zCYObpCbgfWfyNWTw=="), '12345678901234567'), aes_decrypt(unbase64("y6Ss+zCYObpCbgfWfyNWTw=="), binary('123456789012345')), aes_decrypt(unbase64("y6Ss+zCYObpCbgfWfyNWTw=="), ''), diff --git a/ql/src/test/results/clientpositive/udf_aes_encrypt.q.out b/ql/src/test/results/clientpositive/udf_aes_encrypt.q.out index 2e1bf9e..b9bafed 100644 --- a/ql/src/test/results/clientpositive/udf_aes_encrypt.q.out +++ b/ql/src/test/results/clientpositive/udf_aes_encrypt.q.out @@ -56,8 +56,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### y6Ss+zCYObpCbgfWfyNWTw== BQGHoM3lqYcsurCRq3PlUw== y6Ss+zCYObpCbgfWfyNWTw== BQGHoM3lqYcsurCRq3PlUw== NULL NULL -PREHOOK: query: --bad key -select +PREHOOK: query: select aes_encrypt('ABC', '12345678901234567'), aes_encrypt(binary('ABC'), binary('123456789012345')), aes_encrypt('ABC', ''), @@ -67,8 +66,7 @@ aes_encrypt(binary('ABC'), cast(null as binary)) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: --bad key -select +POSTHOOK: query: select aes_encrypt('ABC', '12345678901234567'), aes_encrypt(binary('ABC'), binary('123456789012345')), aes_encrypt('ABC', ''), diff --git a/ql/src/test/results/clientpositive/udf_array_contains.q.out b/ql/src/test/results/clientpositive/udf_array_contains.q.out index 9320deb..090bba7 100644 --- a/ql/src/test/results/clientpositive/udf_array_contains.q.out +++ b/ql/src/test/results/clientpositive/udf_array_contains.q.out @@ -13,25 +13,21 @@ Example: true Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFArrayContains Function type:BUILTIN -PREHOOK: query: -- evalutes function for array of primitives -SELECT array_contains(array(1, 2, 3), 1) FROM src tablesample (1 rows) +PREHOOK: query: SELECT array_contains(array(1, 2, 3), 1) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- evalutes function for array of primitives -SELECT array_contains(array(1, 2, 3), 1) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT array_contains(array(1, 2, 3), 1) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### true -PREHOOK: query: -- evaluates function for nested arrays -SELECT array_contains(array(array(1,2), array(2,3), array(3,4)), array(1,2)) +PREHOOK: query: SELECT array_contains(array(array(1,2), array(2,3), array(3,4)), array(1,2)) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- evaluates function for nested arrays -SELECT array_contains(array(array(1,2), array(2,3), array(3,4)), array(1,2)) +POSTHOOK: query: SELECT array_contains(array(array(1,2), array(2,3), array(3,4)), array(1,2)) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/udf_bin.q.out b/ql/src/test/results/clientpositive/udf_bin.q.out index c34a549..a05e8a2 100644 --- a/ql/src/test/results/clientpositive/udf_bin.q.out +++ b/ql/src/test/results/clientpositive/udf_bin.q.out @@ -31,13 +31,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 1 0 101111101011100001101100101 -PREHOOK: query: -- Negative numbers should be treated as two's complement (64 bit). -SELECT bin(-5) FROM src tablesample (1 rows) +PREHOOK: query: SELECT bin(-5) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Negative numbers should be treated as two's complement (64 bit). -SELECT bin(-5) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT bin(-5) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udf_case.q.out b/ql/src/test/results/clientpositive/udf_case.q.out index 3933496..37e336d 100644 --- a/ql/src/test/results/clientpositive/udf_case.q.out +++ b/ql/src/test/results/clientpositive/udf_case.q.out @@ -155,25 +155,20 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 2 5 15 NULL 20 24 -PREHOOK: query: -- verify that short-circuiting is working correctly for CASE --- we should never get to the ELSE branch, which would raise an exception -SELECT CASE 1 WHEN 1 THEN 'yo' +PREHOOK: query: SELECT CASE 1 WHEN 1 THEN 'yo' ELSE reflect('java.lang.String', 'bogus', 1) END FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- verify that short-circuiting is working correctly for CASE --- we should never get to the ELSE branch, which would raise an exception -SELECT CASE 1 WHEN 1 THEN 'yo' +POSTHOOK: query: SELECT CASE 1 WHEN 1 THEN 'yo' ELSE reflect('java.lang.String', 'bogus', 1) END FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### yo -PREHOOK: query: -- Allow compatible types in when/return type -SELECT CASE 1 +PREHOOK: query: SELECT CASE 1 WHEN 1 THEN 123.0BD ELSE 0.0BD END, @@ -191,8 +186,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Allow compatible types in when/return type -SELECT CASE 1 +POSTHOOK: query: SELECT CASE 1 WHEN 1 THEN 123.0BD ELSE 0.0BD END, diff --git a/ql/src/test/results/clientpositive/udf_classloader.q.out b/ql/src/test/results/clientpositive/udf_classloader.q.out index 031d88a..31cb0d7 100644 --- a/ql/src/test/results/clientpositive/udf_classloader.q.out +++ b/ql/src/test/results/clientpositive/udf_classloader.q.out @@ -10,15 +10,11 @@ PREHOOK: Output: f2 POSTHOOK: query: CREATE TEMPORARY FUNCTION f2 AS 'hive.it.custom.udfs.UDF2' POSTHOOK: type: CREATEFUNCTION POSTHOOK: Output: f2 -PREHOOK: query: -- udf-classloader-udf1.jar contains f1 which relies on udf-classloader-util.jar, --- similiary udf-classloader-udf2.jar contains f2 which also relies on udf-classloader-util.jar. -SELECT f1(*), f2(*) FROM SRC limit 1 +PREHOOK: query: SELECT f1(*), f2(*) FROM SRC limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- udf-classloader-udf1.jar contains f1 which relies on udf-classloader-util.jar, --- similiary udf-classloader-udf2.jar contains f2 which also relies on udf-classloader-util.jar. -SELECT f1(*), f2(*) FROM SRC limit 1 +POSTHOOK: query: SELECT f1(*), f2(*) FROM SRC limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udf_classloader_dynamic_dependency_resolution.q.out b/ql/src/test/results/clientpositive/udf_classloader_dynamic_dependency_resolution.q.out index 031d88a..31cb0d7 100644 --- a/ql/src/test/results/clientpositive/udf_classloader_dynamic_dependency_resolution.q.out +++ b/ql/src/test/results/clientpositive/udf_classloader_dynamic_dependency_resolution.q.out @@ -10,15 +10,11 @@ PREHOOK: Output: f2 POSTHOOK: query: CREATE TEMPORARY FUNCTION f2 AS 'hive.it.custom.udfs.UDF2' POSTHOOK: type: CREATEFUNCTION POSTHOOK: Output: f2 -PREHOOK: query: -- udf-classloader-udf1.jar contains f1 which relies on udf-classloader-util.jar, --- similiary udf-classloader-udf2.jar contains f2 which also relies on udf-classloader-util.jar. -SELECT f1(*), f2(*) FROM SRC limit 1 +PREHOOK: query: SELECT f1(*), f2(*) FROM SRC limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- udf-classloader-udf1.jar contains f1 which relies on udf-classloader-util.jar, --- similiary udf-classloader-udf2.jar contains f2 which also relies on udf-classloader-util.jar. -SELECT f1(*), f2(*) FROM SRC limit 1 +POSTHOOK: query: SELECT f1(*), f2(*) FROM SRC limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udf_concat.q.out b/ql/src/test/results/clientpositive/udf_concat.q.out index 00b87b4..e811c59 100644 --- a/ql/src/test/results/clientpositive/udf_concat.q.out +++ b/ql/src/test/results/clientpositive/udf_concat.q.out @@ -45,16 +45,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### ab abc NULL NULL a NULL 123a 12 1 1234abcextra argument -PREHOOK: query: -- binary/mixed -SELECT +PREHOOK: query: SELECT concat(cast('ab' as binary), cast('cd' as binary)), concat('ab', cast('cd' as binary)) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- binary/mixed -SELECT +POSTHOOK: query: SELECT concat(cast('ab' as binary), cast('cd' as binary)), concat('ab', cast('cd' as binary)) FROM src tablesample (1 rows) diff --git a/ql/src/test/results/clientpositive/udf_concat_ws.q.out b/ql/src/test/results/clientpositive/udf_concat_ws.q.out index 09a2ed4..2c50b1e 100644 --- a/ql/src/test/results/clientpositive/udf_concat_ws.q.out +++ b/ql/src/test/results/clientpositive/udf_concat_ws.q.out @@ -76,8 +76,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@dest1 #### A masked pattern was here #### xyzabc8675309 abc,xyz,8675309 NULL abc**8675309 -PREHOOK: query: -- evalutes function for array of strings -EXPLAIN +PREHOOK: query: EXPLAIN SELECT concat_ws('.', array('www', 'face', 'book', 'com'), '1234'), concat_ws('-', 'www', array('face', 'book', 'com'), '1234'), concat_ws('F', 'www', array('face', 'book', 'com', '1234')), @@ -86,8 +85,7 @@ SELECT concat_ws('.', array('www', 'face', 'book', 'com'), '1234'), concat_ws('[]', array('www'), 'face', array('book', 'com', '1234')), concat_ws('AAA', array('www'), array('face', 'book', 'com'), '1234') FROM dest1 tablesample (1 rows) PREHOOK: type: QUERY -POSTHOOK: query: -- evalutes function for array of strings -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT concat_ws('.', array('www', 'face', 'book', 'com'), '1234'), concat_ws('-', 'www', array('face', 'book', 'com'), '1234'), concat_ws('F', 'www', array('face', 'book', 'com', '1234')), diff --git a/ql/src/test/results/clientpositive/udf_conv.q.out b/ql/src/test/results/clientpositive/udf_conv.q.out index 036bbfb..4c5ba75 100644 --- a/ql/src/test/results/clientpositive/udf_conv.q.out +++ b/ql/src/test/results/clientpositive/udf_conv.q.out @@ -16,10 +16,7 @@ Example: '16' Function class:org.apache.hadoop.hive.ql.udf.UDFConv Function type:BUILTIN -PREHOOK: query: -- conv must work on both strings and integers up to 64-bit precision - --- Some simple conversions to test different bases -SELECT +PREHOOK: query: SELECT conv('4521', 10, 36), conv('22', 10, 10), conv('110011', 2, 16), @@ -28,10 +25,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- conv must work on both strings and integers up to 64-bit precision - --- Some simple conversions to test different bases -SELECT +POSTHOOK: query: SELECT conv('4521', 10, 36), conv('22', 10, 10), conv('110011', 2, 16), @@ -41,9 +35,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 3HL 22 33 116ED2B2FB4 -PREHOOK: query: -- Test negative numbers. If to_base is positive, the number should be handled --- as a two's complement (64-bit) -SELECT +PREHOOK: query: SELECT conv('-641', 10, -10), conv('1011', 2, -16), conv('-1', 10, 16), @@ -52,9 +44,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Test negative numbers. If to_base is positive, the number should be handled --- as a two's complement (64-bit) -SELECT +POSTHOOK: query: SELECT conv('-641', 10, -10), conv('1011', 2, -16), conv('-1', 10, 16), @@ -64,9 +54,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -641 B FFFFFFFFFFFFFFFF FFFFFFFFFFFFFFF1 -PREHOOK: query: -- Test overflow. If a number is two large, the result should be -1 (if signed) --- or MAX_LONG (if unsigned) -SELECT +PREHOOK: query: SELECT conv('9223372036854775807', 36, 16), conv('9223372036854775807', 36, -16), conv('-9223372036854775807', 36, 16), @@ -75,9 +63,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Test overflow. If a number is two large, the result should be -1 (if signed) --- or MAX_LONG (if unsigned) -SELECT +POSTHOOK: query: SELECT conv('9223372036854775807', 36, 16), conv('9223372036854775807', 36, -16), conv('-9223372036854775807', 36, 16), @@ -87,10 +73,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### FFFFFFFFFFFFFFFF -1 FFFFFFFFFFFFFFFF -1 -PREHOOK: query: -- Test with invalid input. If one of the bases is invalid, the result should --- be NULL. If there is an invalid digit in the number, the longest valid --- prefix should be converted. -SELECT +PREHOOK: query: SELECT conv('123455', 3, 10), conv('131', 1, 5), conv('515', 5, 100), @@ -99,10 +82,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Test with invalid input. If one of the bases is invalid, the result should --- be NULL. If there is an invalid digit in the number, the longest valid --- prefix should be converted. -SELECT +POSTHOOK: query: SELECT conv('123455', 3, 10), conv('131', 1, 5), conv('515', 5, 100), @@ -112,9 +92,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 5 NULL NULL NULL -PREHOOK: query: -- Perform the same tests with number arguments. - -SELECT +PREHOOK: query: SELECT conv(4521, 10, 36), conv(22, 10, 10), conv(110011, 2, 16) @@ -122,9 +100,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Perform the same tests with number arguments. - -SELECT +POSTHOOK: query: SELECT conv(4521, 10, 36), conv(22, 10, 10), conv(110011, 2, 16) @@ -190,17 +166,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 5 NULL NULL NULL -PREHOOK: query: -- Make sure that state is properly reset. - -SELECT conv(key, 10, 16), +PREHOOK: query: SELECT conv(key, 10, 16), conv(key, 16, 10) FROM src tablesample (3 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Make sure that state is properly reset. - -SELECT conv(key, 10, 16), +POSTHOOK: query: SELECT conv(key, 10, 16), conv(key, 16, 10) FROM src tablesample (3 rows) POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/udf_date_add.q.out b/ql/src/test/results/clientpositive/udf_date_add.q.out index 54d9022..fa662d1 100644 --- a/ql/src/test/results/clientpositive/udf_date_add.q.out +++ b/ql/src/test/results/clientpositive/udf_date_add.q.out @@ -14,15 +14,13 @@ Example: '2009-07-31' Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFDateAdd Function type:BUILTIN -PREHOOK: query: -- Test different numeric data types for date_add -SELECT date_add('1900-01-01', cast(10 as tinyint)), +PREHOOK: query: SELECT date_add('1900-01-01', cast(10 as tinyint)), date_add('1900-01-01', cast(10 as smallint)), date_add('1900-01-01', cast(10 as int)) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: -- Test different numeric data types for date_add -SELECT date_add('1900-01-01', cast(10 as tinyint)), +POSTHOOK: query: SELECT date_add('1900-01-01', cast(10 as tinyint)), date_add('1900-01-01', cast(10 as smallint)), date_add('1900-01-01', cast(10 as int)) POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/udf_date_format.q.out b/ql/src/test/results/clientpositive/udf_date_format.q.out index 08eb469..e88fff9 100644 --- a/ql/src/test/results/clientpositive/udf_date_format.q.out +++ b/ql/src/test/results/clientpositive/udf_date_format.q.out @@ -35,8 +35,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 93 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: --string date -select +PREHOOK: query: select date_format('2015-04-08', 'E'), date_format('2015-04-08', 'G'), date_format('2015-04-08', 'y'), @@ -51,8 +50,7 @@ date_format('01/29/2014', 'dd') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: --string date -select +POSTHOOK: query: select date_format('2015-04-08', 'E'), date_format('2015-04-08', 'G'), date_format('2015-04-08', 'y'), @@ -68,8 +66,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### Wed AD 2015 2015 Apr 15 2 98 8 NULL NULL -PREHOOK: query: --string timestamp -select +PREHOOK: query: select date_format('2015-04-08 10:30:45', 'HH'), date_format('2015-04-08 10:30:45', 'mm'), date_format('2015-04-08 10:30:45', 'ss'), @@ -83,8 +80,7 @@ date_format('04/08/2015 10:30:45', 'dd') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: --string timestamp -select +POSTHOOK: query: select date_format('2015-04-08 10:30:45', 'HH'), date_format('2015-04-08 10:30:45', 'mm'), date_format('2015-04-08 10:30:45', 'ss'), @@ -99,8 +95,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 10 30 45 09 PM 08 123 08 08 NULL NULL -PREHOOK: query: --date -select +PREHOOK: query: select date_format(cast('2015-04-08' as date), 'EEEE'), date_format(cast('2015-04-08' as date), 'G'), date_format(cast('2015-04-08' as date), 'yyyy'), @@ -114,8 +109,7 @@ date_format(cast(null as date), 'dd') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: --date -select +POSTHOOK: query: select date_format(cast('2015-04-08' as date), 'EEEE'), date_format(cast('2015-04-08' as date), 'G'), date_format(cast('2015-04-08' as date), 'yyyy'), @@ -130,8 +124,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### Wednesday AD 2015 15 Apr 15 2 98 8 NULL -PREHOOK: query: --timestamp -select +PREHOOK: query: select date_format(cast('2015-04-08 10:30:45' as timestamp), 'HH'), date_format(cast('2015-04-08 10:30:45' as timestamp), 'mm'), date_format(cast('2015-04-08 10:30:45' as timestamp), 'ss'), @@ -143,8 +136,7 @@ date_format(cast(null as timestamp), 'HH') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: --timestamp -select +POSTHOOK: query: select date_format(cast('2015-04-08 10:30:45' as timestamp), 'HH'), date_format(cast('2015-04-08 10:30:45' as timestamp), 'mm'), date_format(cast('2015-04-08 10:30:45' as timestamp), 'ss'), @@ -157,15 +149,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 10 30 45 10 AM 08 123 123 NULL -PREHOOK: query: -- wrong fmt -select +PREHOOK: query: select date_format('2015-04-08', ''), date_format('2015-04-08', 'Q') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: -- wrong fmt -select +POSTHOOK: query: select date_format('2015-04-08', ''), date_format('2015-04-08', 'Q') POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/udf_date_sub.q.out b/ql/src/test/results/clientpositive/udf_date_sub.q.out index ac3b4aa..6bc9f48 100644 --- a/ql/src/test/results/clientpositive/udf_date_sub.q.out +++ b/ql/src/test/results/clientpositive/udf_date_sub.q.out @@ -14,15 +14,13 @@ Example: '2009-07-29' Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFDateSub Function type:BUILTIN -PREHOOK: query: -- Test different numeric data types for date_add -SELECT date_sub('1900-01-01', cast(10 as tinyint)), +PREHOOK: query: SELECT date_sub('1900-01-01', cast(10 as tinyint)), date_sub('1900-01-01', cast(10 as smallint)), date_sub('1900-01-01', cast(10 as int)) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: -- Test different numeric data types for date_add -SELECT date_sub('1900-01-01', cast(10 as tinyint)), +POSTHOOK: query: SELECT date_sub('1900-01-01', cast(10 as tinyint)), date_sub('1900-01-01', cast(10 as smallint)), date_sub('1900-01-01', cast(10 as int)) POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/udf_format_number.q.out b/ql/src/test/results/clientpositive/udf_format_number.q.out index 602f78b..0621875 100644 --- a/ql/src/test/results/clientpositive/udf_format_number.q.out +++ b/ql/src/test/results/clientpositive/udf_format_number.q.out @@ -4,13 +4,9 @@ PREHOOK: Input: database:default POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: -- Test format_number() UDF - -DESCRIBE FUNCTION format_number +PREHOOK: query: DESCRIBE FUNCTION format_number PREHOOK: type: DESCFUNCTION -POSTHOOK: query: -- Test format_number() UDF - -DESCRIBE FUNCTION format_number +POSTHOOK: query: DESCRIBE FUNCTION format_number POSTHOOK: type: DESCFUNCTION format_number(X, D or F) - Formats the number X to a format like '#,###,###.##', rounded to D decimal places, Or Uses the format specified F to format, and returns the result as a string. If D is 0, the result has no decimal point or fractional part. This is supposed to function like MySQL's FORMAT PREHOOK: query: DESCRIBE FUNCTION EXTENDED format_number @@ -74,8 +70,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 12,332.1235 12,332.1000 12,332 12332.2 -PREHOOK: query: -- positive numbers -SELECT format_number(0.123456789, 12), +PREHOOK: query: SELECT format_number(0.123456789, 12), format_number(12345678.123456789, 5), format_number(1234567.123456789, 7), format_number(123456.123456789, 0), @@ -84,8 +79,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- positive numbers -SELECT format_number(0.123456789, 12), +POSTHOOK: query: SELECT format_number(0.123456789, 12), format_number(12345678.123456789, 5), format_number(1234567.123456789, 7), format_number(123456.123456789, 0), @@ -95,8 +89,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 0.123456789000 12,345,678.12346 1,234,567.1234568 123,456 123456.123 -PREHOOK: query: -- negative numbers -SELECT format_number(-123456.123456789, 0), +PREHOOK: query: SELECT format_number(-123456.123456789, 0), format_number(-1234567.123456789, 2), format_number(-0.123456789, 15), format_number(-0.123456789, '##################.###'), @@ -106,8 +99,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- negative numbers -SELECT format_number(-123456.123456789, 0), +POSTHOOK: query: SELECT format_number(-123456.123456789, 0), format_number(-1234567.123456789, 2), format_number(-0.123456789, 15), format_number(-0.123456789, '##################.###'), @@ -118,8 +110,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -123,456 -1,234,567.12 -0.123456789000000 -0.123 -12,345.1235 -12345.123 -PREHOOK: query: -- zeros -SELECT format_number(0.0, 4), +PREHOOK: query: SELECT format_number(0.0, 4), format_number(0.000000, 1), format_number(000.0000, 1), format_number(00000.0000, 1), @@ -130,8 +121,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- zeros -SELECT format_number(0.0, 4), +POSTHOOK: query: SELECT format_number(0.0, 4), format_number(0.000000, 1), format_number(000.0000, 1), format_number(00000.0000, 1), @@ -143,8 +133,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 0.0000 0.0 0.0 0.0 0 0.0000 0 -PREHOOK: query: -- integers -SELECT format_number(0, 0), +PREHOOK: query: SELECT format_number(0, 0), format_number(1, 4), format_number(12, 2), format_number(123, 5), @@ -154,8 +143,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- integers -SELECT format_number(0, 0), +POSTHOOK: query: SELECT format_number(0, 0), format_number(1, 4), format_number(12, 2), format_number(123, 5), @@ -166,13 +154,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 0 1.0000 12.00 123.00000 1,234.0000000 1234 -PREHOOK: query: -- long and double boundary --- 9223372036854775807 is LONG_MAX --- -9223372036854775807 is one more than LONG_MIN, --- due to HIVE-2733, put it here to check LONG_MIN boundary --- 4.9E-324 and 1.7976931348623157E308 are Double.MIN_VALUE and Double.MAX_VALUE --- check them for Double boundary -SELECT format_number(-9223372036854775807, 10), +PREHOOK: query: SELECT format_number(-9223372036854775807, 10), format_number(9223372036854775807, 20), format_number(4.9E-324, 324), format_number(1.7976931348623157E308, 308) @@ -180,13 +162,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- long and double boundary --- 9223372036854775807 is LONG_MAX --- -9223372036854775807 is one more than LONG_MIN, --- due to HIVE-2733, put it here to check LONG_MIN boundary --- 4.9E-324 and 1.7976931348623157E308 are Double.MIN_VALUE and Double.MAX_VALUE --- check them for Double boundary -SELECT format_number(-9223372036854775807, 10), +POSTHOOK: query: SELECT format_number(-9223372036854775807, 10), format_number(9223372036854775807, 20), format_number(4.9E-324, 324), format_number(1.7976931348623157E308, 308) @@ -195,8 +171,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -9,223,372,036,854,775,807.0000000000 9,223,372,036,854,775,807.00000000000000000000 0.000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000005 179,769,313,486,231,570,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000,000.00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 -PREHOOK: query: -- floats -SELECT format_number(CAST(12332.123456 AS FLOAT), 4), +PREHOOK: query: SELECT format_number(CAST(12332.123456 AS FLOAT), 4), format_number(CAST(12332.1 AS FLOAT), 4), format_number(CAST(-12332.2 AS FLOAT), 0), format_number(CAST(-12332.2 AS FLOAT), '##################.###') @@ -204,8 +179,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- floats -SELECT format_number(CAST(12332.123456 AS FLOAT), 4), +POSTHOOK: query: SELECT format_number(CAST(12332.123456 AS FLOAT), 4), format_number(CAST(12332.1 AS FLOAT), 4), format_number(CAST(-12332.2 AS FLOAT), 0), format_number(CAST(-12332.2 AS FLOAT), '##################.###') @@ -214,8 +188,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 12,332.1230 12,332.0996 -12,332 -12332.2 -PREHOOK: query: -- decimals -SELECT format_number(12332.123456BD, 4), +PREHOOK: query: SELECT format_number(12332.123456BD, 4), format_number(12332.123456BD, 2), format_number(12332.1BD, 4), format_number(-12332.2BD, 0), @@ -225,8 +198,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- decimals -SELECT format_number(12332.123456BD, 4), +POSTHOOK: query: SELECT format_number(12332.123456BD, 4), format_number(12332.123456BD, 2), format_number(12332.1BD, 4), format_number(-12332.2BD, 0), @@ -237,16 +209,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 12,332.1235 12,332.12 12,332.1000 -12,332 12,332.6000 12332.1 -PREHOOK: query: -- nulls -SELECT +PREHOOK: query: SELECT format_number(cast(null as int), 0), format_number(12332.123456BD, cast(null as int)), format_number(cast(null as int), cast(null as int)) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: -- nulls -SELECT +POSTHOOK: query: SELECT format_number(cast(null as int), 0), format_number(12332.123456BD, cast(null as int)), format_number(cast(null as int), cast(null as int)) @@ -254,8 +224,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### NULL NULL NULL -PREHOOK: query: -- format number with format string passed -SELECT format_number(-9223372036854775807, '##################.###'), +PREHOOK: query: SELECT format_number(-9223372036854775807, '##################.###'), format_number(9223372036854775807, '##################.###'), format_number(4.9E-324, '##################.###'), format_number(1.7976931348623157E308, '##################.###'), @@ -264,8 +233,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- format number with format string passed -SELECT format_number(-9223372036854775807, '##################.###'), +POSTHOOK: query: SELECT format_number(-9223372036854775807, '##################.###'), format_number(9223372036854775807, '##################.###'), format_number(4.9E-324, '##################.###'), format_number(1.7976931348623157E308, '##################.###'), diff --git a/ql/src/test/results/clientpositive/udf_get_json_object.q.out b/ql/src/test/results/clientpositive/udf_get_json_object.q.out index 0ce9608..b23b29c 100644 --- a/ql/src/test/results/clientpositive/udf_get_json_object.q.out +++ b/ql/src/test/results/clientpositive/udf_get_json_object.q.out @@ -152,15 +152,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src_json #### A masked pattern was here #### 1234 -PREHOOK: query: -- Verify that get_json_object can handle new lines in JSON values - -CREATE TABLE dest2(c1 STRING) STORED AS RCFILE +PREHOOK: query: CREATE TABLE dest2(c1 STRING) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest2 -POSTHOOK: query: -- Verify that get_json_object can handle new lines in JSON values - -CREATE TABLE dest2(c1 STRING) STORED AS RCFILE +POSTHOOK: query: CREATE TABLE dest2(c1 STRING) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest2 @@ -193,8 +189,7 @@ POSTHOOK: Input: default@dest2 #### A masked pattern was here #### b c -PREHOOK: query: --root is array -SELECT +PREHOOK: query: SELECT get_json_object('[1,2,3]', '$[0]'), get_json_object('[1,2,3]', '$.[0]'), get_json_object('[1,2,3]', '$.[1]'), @@ -210,8 +205,7 @@ get_json_object('[{"k1":[{"k11":[1,2,3]}]}]', '$[0].k1[0].k11[1]') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: --root is array -SELECT +POSTHOOK: query: SELECT get_json_object('[1,2,3]', '$[0]'), get_json_object('[1,2,3]', '$.[0]'), get_json_object('[1,2,3]', '$.[1]'), @@ -228,16 +222,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 1 1 2 2 3 [1,2,3] [1,2,3] {"k3":"v3"} v3 [4,5,6] 4 2 -PREHOOK: query: --null -SELECT +PREHOOK: query: SELECT get_json_object('[1,2,3]', '[2]'), get_json_object('[1,2,3]', '$0'), get_json_object('[1,2,3]', '$[3]') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: --null -SELECT +POSTHOOK: query: SELECT get_json_object('[1,2,3]', '[2]'), get_json_object('[1,2,3]', '$0'), get_json_object('[1,2,3]', '$[3]') diff --git a/ql/src/test/results/clientpositive/udf_hex.q.out b/ql/src/test/results/clientpositive/udf_hex.q.out index 87b6e11..9225085 100644 --- a/ql/src/test/results/clientpositive/udf_hex.q.out +++ b/ql/src/test/results/clientpositive/udf_hex.q.out @@ -17,9 +17,7 @@ Example: '46616365626F6F6B' Function class:org.apache.hadoop.hive.ql.udf.UDFHex Function type:BUILTIN -PREHOOK: query: -- If the argument is a string, hex should return a string containing two hex --- digits for every character in the input. -SELECT +PREHOOK: query: SELECT hex('Facebook'), hex('\0'), hex('qwertyuiopasdfghjkl') @@ -27,9 +25,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- If the argument is a string, hex should return a string containing two hex --- digits for every character in the input. -SELECT +POSTHOOK: query: SELECT hex('Facebook'), hex('\0'), hex('qwertyuiopasdfghjkl') @@ -38,8 +34,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 46616365626F6F6B 00 71776572747975696F706173646667686A6B6C -PREHOOK: query: -- If the argument is a number, hex should convert it to hexadecimal. -SELECT +PREHOOK: query: SELECT hex(1), hex(0), hex(4207849477) @@ -47,8 +42,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- If the argument is a number, hex should convert it to hexadecimal. -SELECT +POSTHOOK: query: SELECT hex(1), hex(0), hex(4207849477) @@ -57,13 +51,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 1 0 FACEB005 -PREHOOK: query: -- Negative numbers should be treated as two's complement (64 bit). -SELECT hex(-5) FROM src tablesample (1 rows) +PREHOOK: query: SELECT hex(-5) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Negative numbers should be treated as two's complement (64 bit). -SELECT hex(-5) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT hex(-5) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udf_if.q.out b/ql/src/test/results/clientpositive/udf_if.q.out index 5364509..eac4fe3 100644 --- a/ql/src/test/results/clientpositive/udf_if.q.out +++ b/ql/src/test/results/clientpositive/udf_if.q.out @@ -67,16 +67,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 1 1 1 1 NULL 2 -PREHOOK: query: -- Type conversions -EXPLAIN +PREHOOK: query: EXPLAIN SELECT IF(TRUE, CAST(128 AS SMALLINT), CAST(1 AS TINYINT)) AS COL1, IF(FALSE, 1, 1.1) AS COL2, IF(FALSE, 1, 'ABC') AS COL3, IF(FALSE, 'ABC', 12.3) AS COL4 FROM src tablesample (1 rows) PREHOOK: type: QUERY -POSTHOOK: query: -- Type conversions -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT IF(TRUE, CAST(128 AS SMALLINT), CAST(1 AS TINYINT)) AS COL1, IF(FALSE, 1, 1.1) AS COL2, IF(FALSE, 1, 'ABC') AS COL3, diff --git a/ql/src/test/results/clientpositive/udf_inline.q.out b/ql/src/test/results/clientpositive/udf_inline.q.out index dca41d9..cceee0e 100644 --- a/ql/src/test/results/clientpositive/udf_inline.q.out +++ b/ql/src/test/results/clientpositive/udf_inline.q.out @@ -68,8 +68,7 @@ POSTHOOK: Input: default@src #### A masked pattern was here #### 1 dude! 2 Wheres -PREHOOK: query: -- HIVE-3475 INLINE UDTF doesn't convert types properly -select * from (SELECT +PREHOOK: query: select * from (SELECT ARRAY( STRUCT (1,'dude!'), STRUCT (2,'Wheres'), @@ -79,8 +78,7 @@ select * from (SELECT PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- HIVE-3475 INLINE UDTF doesn't convert types properly -select * from (SELECT +POSTHOOK: query: select * from (SELECT ARRAY( STRUCT (1,'dude!'), STRUCT (2,'Wheres'), diff --git a/ql/src/test/results/clientpositive/udf_java_method.q.out b/ql/src/test/results/clientpositive/udf_java_method.q.out index 0f42524..f2f146d 100644 --- a/ql/src/test/results/clientpositive/udf_java_method.q.out +++ b/ql/src/test/results/clientpositive/udf_java_method.q.out @@ -13,9 +13,7 @@ Use this UDF to call Java methods by matching the argument signature Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFReflect Function type:BUILTIN -PREHOOK: query: -- java_method() is a synonym for reflect() - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT java_method("java.lang.String", "valueOf", 1), java_method("java.lang.String", "isEmpty"), java_method("java.lang.Math", "max", 2, 3), @@ -25,9 +23,7 @@ SELECT java_method("java.lang.String", "valueOf", 1), java_method("java.lang.Math", "floor", 1.9D) FROM src tablesample (1 rows) PREHOOK: type: QUERY -POSTHOOK: query: -- java_method() is a synonym for reflect() - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT java_method("java.lang.String", "valueOf", 1), java_method("java.lang.String", "isEmpty"), java_method("java.lang.Math", "max", 2, 3), diff --git a/ql/src/test/results/clientpositive/udf_length.q.out b/ql/src/test/results/clientpositive/udf_length.q.out index 461e129..07d2049 100644 --- a/ql/src/test/results/clientpositive/udf_length.q.out +++ b/ql/src/test/results/clientpositive/udf_length.q.out @@ -157,13 +157,11 @@ POSTHOOK: query: DROP TABLE dest1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@dest1 POSTHOOK: Output: default@dest1 -PREHOOK: query: -- Test with non-ascii characters. -CREATE TABLE dest1(name STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(name STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- Test with non-ascii characters. -CREATE TABLE dest1(name STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(name STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/udf_map_keys.q.out b/ql/src/test/results/clientpositive/udf_map_keys.q.out index d8f90fd..e31b6c9 100644 --- a/ql/src/test/results/clientpositive/udf_map_keys.q.out +++ b/ql/src/test/results/clientpositive/udf_map_keys.q.out @@ -4,13 +4,9 @@ PREHOOK: Input: database:default POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: -- Test map_keys() UDF - -DESCRIBE FUNCTION map_keys +PREHOOK: query: DESCRIBE FUNCTION map_keys PREHOOK: type: DESCFUNCTION -POSTHOOK: query: -- Test map_keys() UDF - -DESCRIBE FUNCTION map_keys +POSTHOOK: query: DESCRIBE FUNCTION map_keys POSTHOOK: type: DESCFUNCTION map_keys(map) - Returns an unordered array containing the keys of the input map. PREHOOK: query: DESCRIBE FUNCTION EXTENDED map_keys @@ -20,24 +16,20 @@ POSTHOOK: type: DESCFUNCTION map_keys(map) - Returns an unordered array containing the keys of the input map. Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFMapKeys Function type:BUILTIN -PREHOOK: query: -- Evaluate function against INT valued keys -SELECT map_keys(map(1, "a", 2, "b", 3, "c")) FROM src tablesample (1 rows) +PREHOOK: query: SELECT map_keys(map(1, "a", 2, "b", 3, "c")) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Evaluate function against INT valued keys -SELECT map_keys(map(1, "a", 2, "b", 3, "c")) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT map_keys(map(1, "a", 2, "b", 3, "c")) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### [1,2,3] -PREHOOK: query: -- Evaluate function against STRING valued keys -SELECT map_keys(map("a", 1, "b", 2, "c", 3)) FROM src tablesample (1 rows) +PREHOOK: query: SELECT map_keys(map("a", 1, "b", 2, "c", 3)) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Evaluate function against STRING valued keys -SELECT map_keys(map("a", 1, "b", 2, "c", 3)) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT map_keys(map("a", 1, "b", 2, "c", 3)) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udf_map_values.q.out b/ql/src/test/results/clientpositive/udf_map_values.q.out index 905b237..fda1229 100644 --- a/ql/src/test/results/clientpositive/udf_map_values.q.out +++ b/ql/src/test/results/clientpositive/udf_map_values.q.out @@ -4,13 +4,9 @@ PREHOOK: Input: database:default POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: -- Test map_values() UDF - -DESCRIBE FUNCTION map_values +PREHOOK: query: DESCRIBE FUNCTION map_values PREHOOK: type: DESCFUNCTION -POSTHOOK: query: -- Test map_values() UDF - -DESCRIBE FUNCTION map_values +POSTHOOK: query: DESCRIBE FUNCTION map_values POSTHOOK: type: DESCFUNCTION map_values(map) - Returns an unordered array containing the values of the input map. PREHOOK: query: DESCRIBE FUNCTION EXTENDED map_values @@ -20,24 +16,20 @@ POSTHOOK: type: DESCFUNCTION map_values(map) - Returns an unordered array containing the values of the input map. Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFMapValues Function type:BUILTIN -PREHOOK: query: -- Evaluate function against STRING valued values -SELECT map_values(map(1, "a", 2, "b", 3, "c")) FROM src tablesample (1 rows) +PREHOOK: query: SELECT map_values(map(1, "a", 2, "b", 3, "c")) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Evaluate function against STRING valued values -SELECT map_values(map(1, "a", 2, "b", 3, "c")) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT map_values(map(1, "a", 2, "b", 3, "c")) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### ["a","b","c"] -PREHOOK: query: -- Evaluate function against INT valued keys -SELECT map_values(map("a", 1, "b", 2, "c", 3)) FROM src tablesample (1 rows) +PREHOOK: query: SELECT map_values(map("a", 1, "b", 2, "c", 3)) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Evaluate function against INT valued keys -SELECT map_values(map("a", 1, "b", 2, "c", 3)) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT map_values(map("a", 1, "b", 2, "c", 3)) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udf_months_between.q.out b/ql/src/test/results/clientpositive/udf_months_between.q.out index 913cd35..1206cf1 100644 --- a/ql/src/test/results/clientpositive/udf_months_between.q.out +++ b/ql/src/test/results/clientpositive/udf_months_between.q.out @@ -15,11 +15,9 @@ date1 and date2 type can be date, timestamp or string in the format 'yyyy-MM-dd' 3.94959677 Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFMonthsBetween Function type:BUILTIN -PREHOOK: query: --test string format -explain select months_between('1995-02-02', '1995-01-01') +PREHOOK: query: explain select months_between('1995-02-02', '1995-01-01') PREHOOK: type: QUERY -POSTHOOK: query: --test string format -explain select months_between('1995-02-02', '1995-01-01') +POSTHOOK: query: explain select months_between('1995-02-02', '1995-01-01') POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -106,8 +104,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 1.03225806 -23.64516129 13.0 -49.0 -0.12903226 0.06451613 -0.09677419 0.03225806 3.7E-7 3.7E-7 3.94959677 1.0 1.0 1.0 1.0 1.0 1.0 1.03225806 1.03225806 1.03225806 1.03225806 1.03225806 1.03225806 1.03225806 -PREHOOK: query: --test timestamp format -select +PREHOOK: query: select months_between(cast('1995-02-02 00:00:00' as timestamp), cast('1995-01-01 00:00:00' as timestamp)), months_between(cast('2003-07-17 00:00:00' as timestamp), cast('2005-07-06 00:00:00' as timestamp)), months_between(cast('2001-06-30 00:00:00' as timestamp), cast('2000-05-31 00:00:00' as timestamp)), @@ -130,8 +127,7 @@ select PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: --test timestamp format -select +POSTHOOK: query: select months_between(cast('1995-02-02 00:00:00' as timestamp), cast('1995-01-01 00:00:00' as timestamp)), months_between(cast('2003-07-17 00:00:00' as timestamp), cast('2005-07-06 00:00:00' as timestamp)), months_between(cast('2001-06-30 00:00:00' as timestamp), cast('2000-05-31 00:00:00' as timestamp)), @@ -155,8 +151,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 1.03225806 -23.64516129 13.0 -49.0 -0.12903226 0.06451613 -0.09677419 0.03225806 3.7E-7 3.7E-7 3.94959677 1.0 1.0 1.0 1.0 1.0 1.0 -PREHOOK: query: --test date format -select +PREHOOK: query: select months_between(cast('1995-02-02' as date), cast('1995-01-01' as date)), months_between(cast('2003-07-17' as date), cast('2005-07-06' as date)), months_between(cast('2001-06-30' as date), cast('2000-05-31' as date)), @@ -168,8 +163,7 @@ select PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: --test date format -select +POSTHOOK: query: select months_between(cast('1995-02-02' as date), cast('1995-01-01' as date)), months_between(cast('2003-07-17' as date), cast('2005-07-06' as date)), months_between(cast('2001-06-30' as date), cast('2000-05-31' as date)), @@ -182,8 +176,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 1.03225806 -23.64516129 13.0 -49.0 -0.12903226 0.06451613 -0.09677419 0.03225806 -PREHOOK: query: --test misc with null -select +PREHOOK: query: select months_between(cast(null as string), '2012-03-01'), months_between('2012-02-31', cast(null as timestamp)), months_between(cast(null as timestamp), cast(null as date)), @@ -203,8 +196,7 @@ select PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: --test misc with null -select +POSTHOOK: query: select months_between(cast(null as string), '2012-03-01'), months_between('2012-02-31', cast(null as timestamp)), months_between(cast(null as timestamp), cast(null as date)), diff --git a/ql/src/test/results/clientpositive/udf_negative.q.out b/ql/src/test/results/clientpositive/udf_negative.q.out index e26dbf8..ea8e270 100644 --- a/ql/src/test/results/clientpositive/udf_negative.q.out +++ b/ql/src/test/results/clientpositive/udf_negative.q.out @@ -10,11 +10,9 @@ POSTHOOK: type: DESCFUNCTION negative a - Returns -a Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNegative Function type:BUILTIN -PREHOOK: query: -- synonym -DESCRIBE FUNCTION - +PREHOOK: query: DESCRIBE FUNCTION - PREHOOK: type: DESCFUNCTION -POSTHOOK: query: -- synonym -DESCRIBE FUNCTION - +POSTHOOK: query: DESCRIBE FUNCTION - POSTHOOK: type: DESCFUNCTION a - b - Returns the difference a-b PREHOOK: query: DESCRIBE FUNCTION EXTENDED - diff --git a/ql/src/test/results/clientpositive/udf_not.q.out b/ql/src/test/results/clientpositive/udf_not.q.out index 134640e..f0f392a 100644 --- a/ql/src/test/results/clientpositive/udf_not.q.out +++ b/ql/src/test/results/clientpositive/udf_not.q.out @@ -11,11 +11,9 @@ not a - Logical not Synonyms: ! Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNot Function type:BUILTIN -PREHOOK: query: -- synonym -DESCRIBE FUNCTION ! +PREHOOK: query: DESCRIBE FUNCTION ! PREHOOK: type: DESCFUNCTION -POSTHOOK: query: -- synonym -DESCRIBE FUNCTION ! +POSTHOOK: query: DESCRIBE FUNCTION ! POSTHOOK: type: DESCFUNCTION ! a - Logical not PREHOOK: query: DESCRIBE FUNCTION EXTENDED ! diff --git a/ql/src/test/results/clientpositive/udf_percentile.q.out b/ql/src/test/results/clientpositive/udf_percentile.q.out index 3f8890b..e3033e7 100644 --- a/ql/src/test/results/clientpositive/udf_percentile.q.out +++ b/ql/src/test/results/clientpositive/udf_percentile.q.out @@ -10,9 +10,7 @@ POSTHOOK: type: DESCFUNCTION percentile(expr, pc) - Returns the percentile(s) of expr at pc (range: [0,1]).pc can be a double or double array Function class:org.apache.hadoop.hive.ql.udf.UDAFPercentile Function type:BUILTIN -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT CAST(key AS INT) DIV 10, +PREHOOK: query: SELECT CAST(key AS INT) DIV 10, percentile(CAST(substr(value, 5) AS INT), 0.0), percentile(CAST(substr(value, 5) AS INT), 0.5), percentile(CAST(substr(value, 5) AS INT), 1.0), @@ -22,9 +20,7 @@ GROUP BY CAST(key AS INT) DIV 10 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT CAST(key AS INT) DIV 10, +POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, percentile(CAST(substr(value, 5) AS INT), 0.0), percentile(CAST(substr(value, 5) AS INT), 0.5), percentile(CAST(substr(value, 5) AS INT), 1.0), @@ -294,8 +290,7 @@ POSTHOOK: Input: default@src 7 70.0 73.0 78.0 [70.0,73.0,77.91000000000001,78.0] 8 80.0 84.0 87.0 [80.0,84.0,86.92,87.0] 9 90.0 95.0 98.0 [90.0,95.0,98.0,98.0] -PREHOOK: query: -- test null handling -SELECT CAST(key AS INT) DIV 10, +PREHOOK: query: SELECT CAST(key AS INT) DIV 10, percentile(NULL, 0.0), percentile(NULL, array(0.0, 0.5, 0.99, 1.0)) FROM src @@ -303,8 +298,7 @@ GROUP BY CAST(key AS INT) DIV 10 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- test null handling -SELECT CAST(key AS INT) DIV 10, +POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, percentile(NULL, 0.0), percentile(NULL, array(0.0, 0.5, 0.99, 1.0)) FROM src @@ -362,8 +356,7 @@ POSTHOOK: Input: default@src 7 NULL NULL 8 NULL NULL 9 NULL NULL -PREHOOK: query: -- test empty array handling -SELECT CAST(key AS INT) DIV 10, +PREHOOK: query: SELECT CAST(key AS INT) DIV 10, percentile(IF(CAST(key AS INT) DIV 10 < 5, 1, NULL), 0.5), percentile(IF(CAST(key AS INT) DIV 10 < 5, 1, NULL), array(0.0, 0.5, 0.99, 1.0)) FROM src @@ -371,8 +364,7 @@ GROUP BY CAST(key AS INT) DIV 10 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- test empty array handling -SELECT CAST(key AS INT) DIV 10, +POSTHOOK: query: SELECT CAST(key AS INT) DIV 10, percentile(IF(CAST(key AS INT) DIV 10 < 5, 1, NULL), 0.5), percentile(IF(CAST(key AS INT) DIV 10 < 5, 1, NULL), array(0.0, 0.5, 0.99, 1.0)) FROM src @@ -439,13 +431,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### NULL -PREHOOK: query: -- test where percentile list is empty -select percentile(cast(key as bigint), array()) from src where false +PREHOOK: query: select percentile(cast(key as bigint), array()) from src where false PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- test where percentile list is empty -select percentile(cast(key as bigint), array()) from src where false +POSTHOOK: query: select percentile(cast(key as bigint), array()) from src where false POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udf_positive.q.out b/ql/src/test/results/clientpositive/udf_positive.q.out index 09c0c1f..f762315 100644 --- a/ql/src/test/results/clientpositive/udf_positive.q.out +++ b/ql/src/test/results/clientpositive/udf_positive.q.out @@ -10,11 +10,9 @@ POSTHOOK: type: DESCFUNCTION positive a - Returns a Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPPositive Function type:BUILTIN -PREHOOK: query: -- synonym -DESCRIBE FUNCTION + +PREHOOK: query: DESCRIBE FUNCTION + PREHOOK: type: DESCFUNCTION -POSTHOOK: query: -- synonym -DESCRIBE FUNCTION + +POSTHOOK: query: DESCRIBE FUNCTION + POSTHOOK: type: DESCFUNCTION a + b - Returns a+b PREHOOK: query: DESCRIBE FUNCTION EXTENDED + diff --git a/ql/src/test/results/clientpositive/udf_printf.q.out b/ql/src/test/results/clientpositive/udf_printf.q.out index 47bc071..732585a 100644 --- a/ql/src/test/results/clientpositive/udf_printf.q.out +++ b/ql/src/test/results/clientpositive/udf_printf.q.out @@ -4,13 +4,9 @@ PREHOOK: Input: database:default POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: -- Test printf() UDF - -DESCRIBE FUNCTION printf +PREHOOK: query: DESCRIBE FUNCTION printf PREHOOK: type: DESCFUNCTION -POSTHOOK: query: -- Test printf() UDF - -DESCRIBE FUNCTION printf +POSTHOOK: query: DESCRIBE FUNCTION printf POSTHOOK: type: DESCFUNCTION printf(String format, Obj... args) - function that can format strings according to printf-style format strings PREHOOK: query: DESCRIBE FUNCTION EXTENDED printf @@ -47,13 +43,11 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 52000 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- Test Primitive Types -SELECT printf("Hello World %d %s", 100, "days") FROM src tablesample (1 rows) +PREHOOK: query: SELECT printf("Hello World %d %s", 100, "days") FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Test Primitive Types -SELECT printf("Hello World %d %s", 100, "days") FROM src tablesample (1 rows) +POSTHOOK: query: SELECT printf("Hello World %d %s", 100, "days") FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### @@ -67,25 +61,21 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### All Type Test: false, A, 15000, 1.234000e+01, +27183.2401, 2300.41, 32, corret, 0x1.002p8 -PREHOOK: query: -- Test NULL Values -SELECT printf("Color %s, String Null: %s, number1 %d, number2 %05d, Integer Null: %d, hex %#x, float %5.2f Double Null: %f\n", "red", NULL, 123456, 89, NULL, 255, 3.14159, NULL) FROM src tablesample (1 rows) +PREHOOK: query: SELECT printf("Color %s, String Null: %s, number1 %d, number2 %05d, Integer Null: %d, hex %#x, float %5.2f Double Null: %f\n", "red", NULL, 123456, 89, NULL, 255, 3.14159, NULL) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Test NULL Values -SELECT printf("Color %s, String Null: %s, number1 %d, number2 %05d, Integer Null: %d, hex %#x, float %5.2f Double Null: %f\n", "red", NULL, 123456, 89, NULL, 255, 3.14159, NULL) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT printf("Color %s, String Null: %s, number1 %d, number2 %05d, Integer Null: %d, hex %#x, float %5.2f Double Null: %f\n", "red", NULL, 123456, 89, NULL, 255, 3.14159, NULL) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### Color red, String Null: null, number1 123456, number2 00089, Integer Null: null, hex 0xff, float 3.14 Double Null: null -PREHOOK: query: -- Test Timestamp -create table timestamp_udf (t timestamp) +PREHOOK: query: create table timestamp_udf (t timestamp) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@timestamp_udf -POSTHOOK: query: -- Test Timestamp -create table timestamp_udf (t timestamp) +POSTHOOK: query: create table timestamp_udf (t timestamp) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@timestamp_udf @@ -119,16 +109,14 @@ POSTHOOK: query: drop table timestamp_udf POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@timestamp_udf POSTHOOK: Output: default@timestamp_udf -PREHOOK: query: -- Test Binary -CREATE TABLE binay_udf(key binary, value int) +PREHOOK: query: CREATE TABLE binay_udf(key binary, value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '9' STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@binay_udf -POSTHOOK: query: -- Test Binary -CREATE TABLE binay_udf(key binary, value int) +POSTHOOK: query: CREATE TABLE binay_udf(key binary, value int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '9' STORED AS TEXTFILE diff --git a/ql/src/test/results/clientpositive/udf_quarter.q.out b/ql/src/test/results/clientpositive/udf_quarter.q.out index 47304e3..5e82f96 100644 --- a/ql/src/test/results/clientpositive/udf_quarter.q.out +++ b/ql/src/test/results/clientpositive/udf_quarter.q.out @@ -34,8 +34,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE ListSink -PREHOOK: query: -- string date -select +PREHOOK: query: select quarter('2014-01-10'), quarter('2014-02-10'), quarter('2014-03-31'), @@ -48,13 +47,13 @@ quarter('2016-09-29'), quarter('2016-10-29'), quarter('2016-11-29'), quarter('2016-12-29'), --- wrong date str + quarter('2016-03-35'), quarter('2014-01-32'), quarter('01/14/2014'), --- null string + quarter(cast(null as string)), --- negative Unix time + quarter('1966-01-01'), quarter('1966-03-31'), quarter('1966-04-01'), @@ -62,8 +61,7 @@ quarter('1966-12-31') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: -- string date -select +POSTHOOK: query: select quarter('2014-01-10'), quarter('2014-02-10'), quarter('2014-03-31'), @@ -76,13 +74,13 @@ quarter('2016-09-29'), quarter('2016-10-29'), quarter('2016-11-29'), quarter('2016-12-29'), --- wrong date str + quarter('2016-03-35'), quarter('2014-01-32'), quarter('01/14/2014'), --- null string + quarter(cast(null as string)), --- negative Unix time + quarter('1966-01-01'), quarter('1966-03-31'), quarter('1966-04-01'), @@ -91,8 +89,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 1 1 1 2 2 2 3 3 3 4 4 4 2 1 NULL NULL 1 1 2 4 -PREHOOK: query: -- string timestamp -select +PREHOOK: query: select quarter('2014-01-10 00:00:00'), quarter('2014-02-10 15:23:00'), quarter('2014-03-31 15:23:00'), @@ -105,13 +102,13 @@ quarter('2016-09-29 15:23:00'), quarter('2016-10-29 15:23:00'), quarter('2016-11-29 15:23:00'), quarter('2016-12-29 15:23:00'), --- wrong date str + quarter('2016-03-35 15:23:00'), quarter('2014-01-32 15:23:00'), quarter('01/14/2014 15:23:00'), --- null VOID type + quarter(null), --- negative Unix time + quarter('1966-01-01 00:00:00'), quarter('1966-03-31 23:59:59.999'), quarter('1966-04-01 00:00:00'), @@ -119,8 +116,7 @@ quarter('1966-12-31 23:59:59.999') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: -- string timestamp -select +POSTHOOK: query: select quarter('2014-01-10 00:00:00'), quarter('2014-02-10 15:23:00'), quarter('2014-03-31 15:23:00'), @@ -133,13 +129,13 @@ quarter('2016-09-29 15:23:00'), quarter('2016-10-29 15:23:00'), quarter('2016-11-29 15:23:00'), quarter('2016-12-29 15:23:00'), --- wrong date str + quarter('2016-03-35 15:23:00'), quarter('2014-01-32 15:23:00'), quarter('01/14/2014 15:23:00'), --- null VOID type + quarter(null), --- negative Unix time + quarter('1966-01-01 00:00:00'), quarter('1966-03-31 23:59:59.999'), quarter('1966-04-01 00:00:00'), @@ -148,8 +144,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 1 1 1 2 2 2 3 3 3 4 4 4 2 1 NULL NULL 1 1 2 4 -PREHOOK: query: -- date -select +PREHOOK: query: select quarter(cast('2014-01-10' as date)), quarter(cast('2014-02-10' as date)), quarter(cast('2014-03-31' as date)), @@ -162,9 +157,9 @@ quarter(cast('2016-09-29' as date)), quarter(cast('2016-10-29' as date)), quarter(cast('2016-11-29' as date)), quarter(cast('2016-12-29' as date)), --- null date + quarter(cast(null as date)), --- negative Unix time + quarter(cast('1966-01-01' as date)), quarter(cast('1966-03-31' as date)), quarter(cast('1966-04-01' as date)), @@ -172,8 +167,7 @@ quarter(cast('1966-12-31' as date)) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: -- date -select +POSTHOOK: query: select quarter(cast('2014-01-10' as date)), quarter(cast('2014-02-10' as date)), quarter(cast('2014-03-31' as date)), @@ -186,9 +180,9 @@ quarter(cast('2016-09-29' as date)), quarter(cast('2016-10-29' as date)), quarter(cast('2016-11-29' as date)), quarter(cast('2016-12-29' as date)), --- null date + quarter(cast(null as date)), --- negative Unix time + quarter(cast('1966-01-01' as date)), quarter(cast('1966-03-31' as date)), quarter(cast('1966-04-01' as date)), @@ -197,8 +191,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 1 1 1 2 2 2 3 3 3 4 4 4 NULL 1 1 2 4 -PREHOOK: query: -- timestamp -select +PREHOOK: query: select quarter(cast('2014-01-10 00:00:00' as timestamp)), quarter(cast('2014-02-10 15:23:00' as timestamp)), quarter(cast('2014-03-31 15:23:00' as timestamp)), @@ -211,9 +204,9 @@ quarter(cast('2016-09-29 15:23:00' as timestamp)), quarter(cast('2016-10-29 15:23:00' as timestamp)), quarter(cast('2016-11-29 15:23:00' as timestamp)), quarter(cast('2016-12-29 15:23:00' as timestamp)), --- null timestamp + quarter(cast(null as timestamp)), --- negative Unix time + quarter(cast('1966-01-01 00:00:00' as timestamp)), quarter(cast('1966-03-31 23:59:59.999' as timestamp)), quarter(cast('1966-04-01 00:00:00' as timestamp)), @@ -221,8 +214,7 @@ quarter(cast('1966-12-31 23:59:59.999' as timestamp)) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: -- timestamp -select +POSTHOOK: query: select quarter(cast('2014-01-10 00:00:00' as timestamp)), quarter(cast('2014-02-10 15:23:00' as timestamp)), quarter(cast('2014-03-31 15:23:00' as timestamp)), @@ -235,9 +227,9 @@ quarter(cast('2016-09-29 15:23:00' as timestamp)), quarter(cast('2016-10-29 15:23:00' as timestamp)), quarter(cast('2016-11-29 15:23:00' as timestamp)), quarter(cast('2016-12-29 15:23:00' as timestamp)), --- null timestamp + quarter(cast(null as timestamp)), --- negative Unix time + quarter(cast('1966-01-01 00:00:00' as timestamp)), quarter(cast('1966-03-31 23:59:59.999' as timestamp)), quarter(cast('1966-04-01 00:00:00' as timestamp)), diff --git a/ql/src/test/results/clientpositive/udf_reverse.q.out b/ql/src/test/results/clientpositive/udf_reverse.q.out index 9d0f869..28b0c9f 100644 --- a/ql/src/test/results/clientpositive/udf_reverse.q.out +++ b/ql/src/test/results/clientpositive/udf_reverse.q.out @@ -157,17 +157,11 @@ POSTHOOK: query: DROP TABLE dest1 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@dest1 POSTHOOK: Output: default@dest1 -PREHOOK: query: -- Test with non-ascii characters --- kv4.txt contains the text 0xE982B5E993AE, which should be reversed to --- 0xE993AEE982B5 -CREATE TABLE dest1(name STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE dest1(name STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- Test with non-ascii characters --- kv4.txt contains the text 0xE982B5E993AE, which should be reversed to --- 0xE993AEE982B5 -CREATE TABLE dest1(name STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE dest1(name STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/udf_round_2.q.out b/ql/src/test/results/clientpositive/udf_round_2.q.out index b231a0f..4dbe8fc 100644 --- a/ql/src/test/results/clientpositive/udf_round_2.q.out +++ b/ql/src/test/results/clientpositive/udf_round_2.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- test for NaN (not-a-number) -create table tstTbl1(n double) +PREHOOK: query: create table tstTbl1(n double) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tstTbl1 -POSTHOOK: query: -- test for NaN (not-a-number) -create table tstTbl1(n double) +POSTHOOK: query: create table tstTbl1(n double) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tstTbl1 @@ -46,13 +44,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tsttbl1 #### A masked pattern was here #### NaN -PREHOOK: query: -- test for Infinity -select round(1/0), round(1/0, 2), round(1.0/0.0), round(1.0/0.0, 2) from src tablesample (1 rows) +PREHOOK: query: select round(1/0), round(1/0, 2), round(1.0/0.0), round(1.0/0.0, 2) from src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- test for Infinity -select round(1/0), round(1/0, 2), round(1.0/0.0), round(1.0/0.0, 2) from src tablesample (1 rows) +POSTHOOK: query: select round(1/0), round(1/0, 2), round(1.0/0.0), round(1.0/0.0, 2) from src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udf_round_3.q.out b/ql/src/test/results/clientpositive/udf_round_3.q.out index 12eee56..13d3891 100644 --- a/ql/src/test/results/clientpositive/udf_round_3.q.out +++ b/ql/src/test/results/clientpositive/udf_round_3.q.out @@ -1,54 +1,44 @@ -PREHOOK: query: -- test for TINYINT -select round(-128), round(127), round(0) from src tablesample (1 rows) +PREHOOK: query: select round(-128), round(127), round(0) from src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- test for TINYINT -select round(-128), round(127), round(0) from src tablesample (1 rows) +POSTHOOK: query: select round(-128), round(127), round(0) from src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -128 127 0 -PREHOOK: query: -- test for SMALLINT -select round(-32768), round(32767), round(-129), round(128) from src tablesample (1 rows) +PREHOOK: query: select round(-32768), round(32767), round(-129), round(128) from src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- test for SMALLINT -select round(-32768), round(32767), round(-129), round(128) from src tablesample (1 rows) +POSTHOOK: query: select round(-32768), round(32767), round(-129), round(128) from src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -32768 32767 -129 128 -PREHOOK: query: -- test for INT -select round(cast(negative(pow(2, 31)) as INT)), round(cast((pow(2, 31) - 1) as INT)), round(-32769), round(32768) from src tablesample (1 rows) +PREHOOK: query: select round(cast(negative(pow(2, 31)) as INT)), round(cast((pow(2, 31) - 1) as INT)), round(-32769), round(32768) from src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- test for INT -select round(cast(negative(pow(2, 31)) as INT)), round(cast((pow(2, 31) - 1) as INT)), round(-32769), round(32768) from src tablesample (1 rows) +POSTHOOK: query: select round(cast(negative(pow(2, 31)) as INT)), round(cast((pow(2, 31) - 1) as INT)), round(-32769), round(32768) from src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -2147483648 2147483647 -32769 32768 -PREHOOK: query: -- test for BIGINT -select round(cast(negative(pow(2, 63)) as BIGINT)), round(cast((pow(2, 63) - 1) as BIGINT)), round(cast(negative(pow(2, 31) + 1) as BIGINT)), round(cast(pow(2, 31) as BIGINT)) from src tablesample (1 rows) +PREHOOK: query: select round(cast(negative(pow(2, 63)) as BIGINT)), round(cast((pow(2, 63) - 1) as BIGINT)), round(cast(negative(pow(2, 31) + 1) as BIGINT)), round(cast(pow(2, 31) as BIGINT)) from src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- test for BIGINT -select round(cast(negative(pow(2, 63)) as BIGINT)), round(cast((pow(2, 63) - 1) as BIGINT)), round(cast(negative(pow(2, 31) + 1) as BIGINT)), round(cast(pow(2, 31) as BIGINT)) from src tablesample (1 rows) +POSTHOOK: query: select round(cast(negative(pow(2, 63)) as BIGINT)), round(cast((pow(2, 63) - 1) as BIGINT)), round(cast(negative(pow(2, 31) + 1) as BIGINT)), round(cast(pow(2, 31) as BIGINT)) from src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -9223372036854775808 9223372036854775807 -2147483649 2147483648 -PREHOOK: query: -- test for DOUBLE -select round(126.1), round(126.7), round(32766.1), round(32766.7) from src tablesample (1 rows) +PREHOOK: query: select round(126.1), round(126.7), round(32766.1), round(32766.7) from src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- test for DOUBLE -select round(126.1), round(126.7), round(32766.1), round(32766.7) from src tablesample (1 rows) +POSTHOOK: query: select round(126.1), round(126.7), round(32766.1), round(32766.7) from src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udf_sentences.q.out b/ql/src/test/results/clientpositive/udf_sentences.q.out index 14a0098..981ee6d 100644 --- a/ql/src/test/results/clientpositive/udf_sentences.q.out +++ b/ql/src/test/results/clientpositive/udf_sentences.q.out @@ -187,11 +187,11 @@ POSTHOOK: Input: default@sent_tmp2 7669656C7365697469676572 766F6E C39C6265727365747A756E67 -PREHOOK: query: SELECT sentences("Hive is an excellent tool for data querying; and perhaps more versatile than machine translation!! Multiple, ill-formed sentences...confounding punctuation--and yet this UDF still works!!!!") AS value FROM src ORDER BY value ASC LIMIT 1 +PREHOOK: query: SELECT sentences("Hive is an excellent tool for data querying\; and perhaps more versatile than machine translation!! Multiple, ill-formed sentences...confounding punctuation--and yet this UDF still works!!!!") AS value FROM src ORDER BY value ASC LIMIT 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: SELECT sentences("Hive is an excellent tool for data querying; and perhaps more versatile than machine translation!! Multiple, ill-formed sentences...confounding punctuation--and yet this UDF still works!!!!") AS value FROM src ORDER BY value ASC LIMIT 1 +POSTHOOK: query: SELECT sentences("Hive is an excellent tool for data querying\; and perhaps more versatile than machine translation!! Multiple, ill-formed sentences...confounding punctuation--and yet this UDF still works!!!!") AS value FROM src ORDER BY value ASC LIMIT 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udf_sha2.q.out b/ql/src/test/results/clientpositive/udf_sha2.q.out index 6553339..5e5c35f 100644 --- a/ql/src/test/results/clientpositive/udf_sha2.q.out +++ b/ql/src/test/results/clientpositive/udf_sha2.q.out @@ -119,15 +119,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 397118fdac8d83ad98813c50759c85b8c47565d8268bf10da483153b747a74743a58a90e85aa9f705ce6984ffc128db567489817e4092d050d8a1cc596ddc119 cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e 397118fdac8d83ad98813c50759c85b8c47565d8268bf10da483153b747a74743a58a90e85aa9f705ce6984ffc128db567489817e4092d050d8a1cc596ddc119 cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e NULL NULL -PREHOOK: query: --null -select +PREHOOK: query: select sha2('ABC', 200), sha2('ABC', cast(null as int)) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: --null -select +POSTHOOK: query: select sha2('ABC', 200), sha2('ABC', cast(null as int)) POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/udf_sort_array.q.out b/ql/src/test/results/clientpositive/udf_sort_array.q.out index 31affff..1e9dc85 100644 --- a/ql/src/test/results/clientpositive/udf_sort_array.q.out +++ b/ql/src/test/results/clientpositive/udf_sort_array.q.out @@ -4,13 +4,9 @@ PREHOOK: Input: database:default POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: -- Test sort_array() UDF - -DESCRIBE FUNCTION sort_array +PREHOOK: query: DESCRIBE FUNCTION sort_array PREHOOK: type: DESCFUNCTION -POSTHOOK: query: -- Test sort_array() UDF - -DESCRIBE FUNCTION sort_array +POSTHOOK: query: DESCRIBE FUNCTION sort_array POSTHOOK: type: DESCFUNCTION sort_array(array(obj1, obj2,...)) - Sorts the input array in ascending order according to the natural ordering of the array elements. PREHOOK: query: DESCRIBE FUNCTION EXTENDED sort_array @@ -23,12 +19,10 @@ Example: 'a', 'b', 'c', 'd' Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFSortArray Function type:BUILTIN -PREHOOK: query: -- Evaluate function against STRING valued keys -EXPLAIN +PREHOOK: query: EXPLAIN SELECT sort_array(array("b", "d", "c", "a")) FROM src tablesample (1 rows) PREHOOK: type: QUERY -POSTHOOK: query: -- Evaluate function against STRING valued keys -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT sort_array(array("b", "d", "c", "a")) FROM src tablesample (1 rows) POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -67,63 +61,52 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### ["enterprise databases","hadoop distributed file system","hadoop map-reduce"] -PREHOOK: query: -- Evaluate function against INT valued keys -SELECT sort_array(array(2, 9, 7, 3, 5, 4, 1, 6, 8)) FROM src tablesample (1 rows) +PREHOOK: query: SELECT sort_array(array(2, 9, 7, 3, 5, 4, 1, 6, 8)) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Evaluate function against INT valued keys -SELECT sort_array(array(2, 9, 7, 3, 5, 4, 1, 6, 8)) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT sort_array(array(2, 9, 7, 3, 5, 4, 1, 6, 8)) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### [1,2,3,4,5,6,7,8,9] -PREHOOK: query: -- Evaluate function against FLOAT valued keys -SELECT sort_array(sort_array(array(2.333, 9, 1.325, 2.003, 0.777, -3.445, 1))) FROM src tablesample (1 rows) +PREHOOK: query: SELECT sort_array(sort_array(array(2.333, 9, 1.325, 2.003, 0.777, -3.445, 1))) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Evaluate function against FLOAT valued keys -SELECT sort_array(sort_array(array(2.333, 9, 1.325, 2.003, 0.777, -3.445, 1))) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT sort_array(sort_array(array(2.333, 9, 1.325, 2.003, 0.777, -3.445, 1))) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### [-3.445,0.777,1,1.325,2.003,2.333,9] -PREHOOK: query: -- Evaluate function against LIST valued keys -SELECT sort_array(array(array(2, 9, 7), array(3, 5, 4), array(1, 6, 8))) FROM src tablesample (1 rows) +PREHOOK: query: SELECT sort_array(array(array(2, 9, 7), array(3, 5, 4), array(1, 6, 8))) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Evaluate function against LIST valued keys -SELECT sort_array(array(array(2, 9, 7), array(3, 5, 4), array(1, 6, 8))) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT sort_array(array(array(2, 9, 7), array(3, 5, 4), array(1, 6, 8))) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### [[1,6,8],[2,9,7],[3,5,4]] -PREHOOK: query: -- Evaluate function against STRUCT valued keys -SELECT sort_array(array(struct(2, 9, 7), struct(3, 5, 4), struct(1, 6, 8))) FROM src tablesample (1 rows) +PREHOOK: query: SELECT sort_array(array(struct(2, 9, 7), struct(3, 5, 4), struct(1, 6, 8))) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Evaluate function against STRUCT valued keys -SELECT sort_array(array(struct(2, 9, 7), struct(3, 5, 4), struct(1, 6, 8))) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT sort_array(array(struct(2, 9, 7), struct(3, 5, 4), struct(1, 6, 8))) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### [{"col1":1,"col2":6,"col3":8},{"col1":2,"col2":9,"col3":7},{"col1":3,"col2":5,"col3":4}] -PREHOOK: query: -- Evaluate function against MAP valued keys -SELECT sort_array(array(map("b", 2, "a", 9, "c", 7), map("c", 3, "b", 5, "a", 1), map("a", 1, "c", 6, "b", 8))) FROM src tablesample (1 rows) +PREHOOK: query: SELECT sort_array(array(map("b", 2, "a", 9, "c", 7), map("c", 3, "b", 5, "a", 1), map("a", 1, "c", 6, "b", 8))) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Evaluate function against MAP valued keys -SELECT sort_array(array(map("b", 2, "a", 9, "c", 7), map("c", 3, "b", 5, "a", 1), map("a", 1, "c", 6, "b", 8))) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT sort_array(array(map("b", 2, "a", 9, "c", 7), map("c", 3, "b", 5, "a", 1), map("a", 1, "c", 6, "b", 8))) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -[{"b":5,"a":1,"c":3},{"b":8,"a":1,"c":6},{"b":2,"a":9,"c":7}] -PREHOOK: query: -- Test it against data in a table. -CREATE TABLE dest1 ( +[{"a":1,"b":5,"c":3},{"a":1,"b":8,"c":6},{"a":9,"b":2,"c":7}] +PREHOOK: query: CREATE TABLE dest1 ( tinyints ARRAY, smallints ARRAY, ints ARRAY, @@ -137,8 +120,7 @@ CREATE TABLE dest1 ( PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- Test it against data in a table. -CREATE TABLE dest1 ( +POSTHOOK: query: CREATE TABLE dest1 ( tinyints ARRAY, smallints ARRAY, ints ARRAY, diff --git a/ql/src/test/results/clientpositive/udf_sort_array_by.q.out b/ql/src/test/results/clientpositive/udf_sort_array_by.q.out index a2b14ff..edd61b8 100644 --- a/ql/src/test/results/clientpositive/udf_sort_array_by.q.out +++ b/ql/src/test/results/clientpositive/udf_sort_array_by.q.out @@ -4,13 +4,9 @@ PREHOOK: Input: database:default POSTHOOK: query: use default POSTHOOK: type: SWITCHDATABASE POSTHOOK: Input: database:default -PREHOOK: query: -- Test sort_array_by() UDF - -DESCRIBE FUNCTION sort_array_by +PREHOOK: query: DESCRIBE FUNCTION sort_array_by PREHOOK: type: DESCFUNCTION -POSTHOOK: query: -- Test sort_array_by() UDF - -DESCRIBE FUNCTION sort_array_by +POSTHOOK: query: DESCRIBE FUNCTION sort_array_by POSTHOOK: type: DESCFUNCTION sort_array_by(array(obj1, obj2,...),'f1','f2',...,['ASC','DESC']) - Sorts the input tuple array in user specified order(ASC,DESC) by desired field[s] name If sorting order is not mentioned by user then dafault sorting order is ascending PREHOOK: query: DESCRIBE FUNCTION EXTENDED sort_array_by @@ -108,47 +104,40 @@ POSTHOOK: Output: default@sort_array_by_table POSTHOOK: Lineage: sort_array_by_table.company EXPRESSION [] POSTHOOK: Lineage: sort_array_by_table.country EXPRESSION [] POSTHOOK: Lineage: sort_array_by_table.employee EXPRESSION [] -PREHOOK: query: --Sort tuple array by field name:salary(single column) by default ascending order -select company,country,sort_array_by(employee,'salary') as single_field_sort from sort_array_by_table +PREHOOK: query: select company,country,sort_array_by(employee,'salary') as single_field_sort from sort_array_by_table PREHOOK: type: QUERY PREHOOK: Input: default@sort_array_by_table #### A masked pattern was here #### -POSTHOOK: query: --Sort tuple array by field name:salary(single column) by default ascending order -select company,country,sort_array_by(employee,'salary') as single_field_sort from sort_array_by_table +POSTHOOK: query: select company,country,sort_array_by(employee,'salary') as single_field_sort from sort_array_by_table POSTHOOK: type: QUERY POSTHOOK: Input: default@sort_array_by_table #### A masked pattern was here #### Google IN [{"empid":76,"name":"Hary","age":87,"salary":10000},{"empid":900,"name":"Hary","age":21,"salary":50000},{"empid":100,"name":"Boo","age":21,"salary":70000},{"empid":756,"name":"Able","age":23,"salary":76889},{"empid":130,"name":"Boo","age":22,"salary":79000},{"empid":800,"name":"Able","age":28,"salary":80000}] Facebook US [{"empid":310,"name":"Ben","age":31,"salary":21000},{"empid":700,"name":"Aron","age":21,"salary":50000},{"empid":390,"name":"Ben","age":21,"salary":70000},{"empid":320,"name":"Aron","age":18,"salary":70000},{"empid":200,"name":"Keiko","age":28,"salary":80000},{"empid":206,"name":"Keiko","age":41,"salary":80500}] Microsoft UK [{"empid":730,"name":"Eden","age":45,"salary":20300},{"empid":313,"name":"James","age":11,"salary":30000},{"empid":260,"name":"Eden","age":31,"salary":50020},{"empid":600,"name":"James","age":21,"salary":70000},{"empid":900,"name":"Spiro","age":28,"salary":80000},{"empid":300,"name":"Spiro","age":38,"salary":80300}] -PREHOOK: query: --Sort tuple array by field name:salary(single column) by ascending order -select company,country,sort_array_by(employee,'salary','ASC') as single_field_sort from sort_array_by_table +PREHOOK: query: select company,country,sort_array_by(employee,'salary','ASC') as single_field_sort from sort_array_by_table PREHOOK: type: QUERY PREHOOK: Input: default@sort_array_by_table #### A masked pattern was here #### -POSTHOOK: query: --Sort tuple array by field name:salary(single column) by ascending order -select company,country,sort_array_by(employee,'salary','ASC') as single_field_sort from sort_array_by_table +POSTHOOK: query: select company,country,sort_array_by(employee,'salary','ASC') as single_field_sort from sort_array_by_table POSTHOOK: type: QUERY POSTHOOK: Input: default@sort_array_by_table #### A masked pattern was here #### Google IN [{"empid":76,"name":"Hary","age":87,"salary":10000},{"empid":900,"name":"Hary","age":21,"salary":50000},{"empid":100,"name":"Boo","age":21,"salary":70000},{"empid":756,"name":"Able","age":23,"salary":76889},{"empid":130,"name":"Boo","age":22,"salary":79000},{"empid":800,"name":"Able","age":28,"salary":80000}] Facebook US [{"empid":310,"name":"Ben","age":31,"salary":21000},{"empid":700,"name":"Aron","age":21,"salary":50000},{"empid":390,"name":"Ben","age":21,"salary":70000},{"empid":320,"name":"Aron","age":18,"salary":70000},{"empid":200,"name":"Keiko","age":28,"salary":80000},{"empid":206,"name":"Keiko","age":41,"salary":80500}] Microsoft UK [{"empid":730,"name":"Eden","age":45,"salary":20300},{"empid":313,"name":"James","age":11,"salary":30000},{"empid":260,"name":"Eden","age":31,"salary":50020},{"empid":600,"name":"James","age":21,"salary":70000},{"empid":900,"name":"Spiro","age":28,"salary":80000},{"empid":300,"name":"Spiro","age":38,"salary":80300}] -PREHOOK: query: --Sort tuple array by field name:salary(single column) by descending order -select company,country,sort_array_by(employee,'salary','desc') as single_field_sort from sort_array_by_table +PREHOOK: query: select company,country,sort_array_by(employee,'salary','desc') as single_field_sort from sort_array_by_table PREHOOK: type: QUERY PREHOOK: Input: default@sort_array_by_table #### A masked pattern was here #### -POSTHOOK: query: --Sort tuple array by field name:salary(single column) by descending order -select company,country,sort_array_by(employee,'salary','desc') as single_field_sort from sort_array_by_table +POSTHOOK: query: select company,country,sort_array_by(employee,'salary','desc') as single_field_sort from sort_array_by_table POSTHOOK: type: QUERY POSTHOOK: Input: default@sort_array_by_table #### A masked pattern was here #### Google IN [{"empid":800,"name":"Able","age":28,"salary":80000},{"empid":130,"name":"Boo","age":22,"salary":79000},{"empid":756,"name":"Able","age":23,"salary":76889},{"empid":100,"name":"Boo","age":21,"salary":70000},{"empid":900,"name":"Hary","age":21,"salary":50000},{"empid":76,"name":"Hary","age":87,"salary":10000}] Facebook US [{"empid":206,"name":"Keiko","age":41,"salary":80500},{"empid":200,"name":"Keiko","age":28,"salary":80000},{"empid":390,"name":"Ben","age":21,"salary":70000},{"empid":320,"name":"Aron","age":18,"salary":70000},{"empid":700,"name":"Aron","age":21,"salary":50000},{"empid":310,"name":"Ben","age":31,"salary":21000}] Microsoft UK [{"empid":300,"name":"Spiro","age":38,"salary":80300},{"empid":900,"name":"Spiro","age":28,"salary":80000},{"empid":600,"name":"James","age":21,"salary":70000},{"empid":260,"name":"Eden","age":31,"salary":50020},{"empid":313,"name":"James","age":11,"salary":30000},{"empid":730,"name":"Eden","age":45,"salary":20300}] -PREHOOK: query: --Above three in one query -select company,country, +PREHOOK: query: select company,country, sort_array_by(employee,'salary') as single_field_sort, sort_array_by(employee,'salary','ASC') as single_field_sort_asc, sort_array_by(employee,'salary','DESC') as single_field_sort_desc @@ -156,8 +145,7 @@ from sort_array_by_table PREHOOK: type: QUERY PREHOOK: Input: default@sort_array_by_table #### A masked pattern was here #### -POSTHOOK: query: --Above three in one query -select company,country, +POSTHOOK: query: select company,country, sort_array_by(employee,'salary') as single_field_sort, sort_array_by(employee,'salary','ASC') as single_field_sort_asc, sort_array_by(employee,'salary','DESC') as single_field_sort_desc @@ -168,47 +156,40 @@ POSTHOOK: Input: default@sort_array_by_table Google IN [{"empid":76,"name":"Hary","age":87,"salary":10000},{"empid":900,"name":"Hary","age":21,"salary":50000},{"empid":100,"name":"Boo","age":21,"salary":70000},{"empid":756,"name":"Able","age":23,"salary":76889},{"empid":130,"name":"Boo","age":22,"salary":79000},{"empid":800,"name":"Able","age":28,"salary":80000}] [{"empid":76,"name":"Hary","age":87,"salary":10000},{"empid":900,"name":"Hary","age":21,"salary":50000},{"empid":100,"name":"Boo","age":21,"salary":70000},{"empid":756,"name":"Able","age":23,"salary":76889},{"empid":130,"name":"Boo","age":22,"salary":79000},{"empid":800,"name":"Able","age":28,"salary":80000}] [{"empid":800,"name":"Able","age":28,"salary":80000},{"empid":130,"name":"Boo","age":22,"salary":79000},{"empid":756,"name":"Able","age":23,"salary":76889},{"empid":100,"name":"Boo","age":21,"salary":70000},{"empid":900,"name":"Hary","age":21,"salary":50000},{"empid":76,"name":"Hary","age":87,"salary":10000}] Facebook US [{"empid":310,"name":"Ben","age":31,"salary":21000},{"empid":700,"name":"Aron","age":21,"salary":50000},{"empid":390,"name":"Ben","age":21,"salary":70000},{"empid":320,"name":"Aron","age":18,"salary":70000},{"empid":200,"name":"Keiko","age":28,"salary":80000},{"empid":206,"name":"Keiko","age":41,"salary":80500}] [{"empid":310,"name":"Ben","age":31,"salary":21000},{"empid":700,"name":"Aron","age":21,"salary":50000},{"empid":390,"name":"Ben","age":21,"salary":70000},{"empid":320,"name":"Aron","age":18,"salary":70000},{"empid":200,"name":"Keiko","age":28,"salary":80000},{"empid":206,"name":"Keiko","age":41,"salary":80500}] [{"empid":206,"name":"Keiko","age":41,"salary":80500},{"empid":200,"name":"Keiko","age":28,"salary":80000},{"empid":390,"name":"Ben","age":21,"salary":70000},{"empid":320,"name":"Aron","age":18,"salary":70000},{"empid":700,"name":"Aron","age":21,"salary":50000},{"empid":310,"name":"Ben","age":31,"salary":21000}] Microsoft UK [{"empid":730,"name":"Eden","age":45,"salary":20300},{"empid":313,"name":"James","age":11,"salary":30000},{"empid":260,"name":"Eden","age":31,"salary":50020},{"empid":600,"name":"James","age":21,"salary":70000},{"empid":900,"name":"Spiro","age":28,"salary":80000},{"empid":300,"name":"Spiro","age":38,"salary":80300}] [{"empid":730,"name":"Eden","age":45,"salary":20300},{"empid":313,"name":"James","age":11,"salary":30000},{"empid":260,"name":"Eden","age":31,"salary":50020},{"empid":600,"name":"James","age":21,"salary":70000},{"empid":900,"name":"Spiro","age":28,"salary":80000},{"empid":300,"name":"Spiro","age":38,"salary":80300}] [{"empid":300,"name":"Spiro","age":38,"salary":80300},{"empid":900,"name":"Spiro","age":28,"salary":80000},{"empid":600,"name":"James","age":21,"salary":70000},{"empid":260,"name":"Eden","age":31,"salary":50020},{"empid":313,"name":"James","age":11,"salary":30000},{"empid":730,"name":"Eden","age":45,"salary":20300}] -PREHOOK: query: --Sort tuple array by field names : name,salary(multiple columns) by default ascending order -select company,country,sort_array_by(employee,'name','salary') as multiple_field_sort from sort_array_by_table +PREHOOK: query: select company,country,sort_array_by(employee,'name','salary') as multiple_field_sort from sort_array_by_table PREHOOK: type: QUERY PREHOOK: Input: default@sort_array_by_table #### A masked pattern was here #### -POSTHOOK: query: --Sort tuple array by field names : name,salary(multiple columns) by default ascending order -select company,country,sort_array_by(employee,'name','salary') as multiple_field_sort from sort_array_by_table +POSTHOOK: query: select company,country,sort_array_by(employee,'name','salary') as multiple_field_sort from sort_array_by_table POSTHOOK: type: QUERY POSTHOOK: Input: default@sort_array_by_table #### A masked pattern was here #### Google IN [{"empid":756,"name":"Able","age":23,"salary":76889},{"empid":800,"name":"Able","age":28,"salary":80000},{"empid":100,"name":"Boo","age":21,"salary":70000},{"empid":130,"name":"Boo","age":22,"salary":79000},{"empid":76,"name":"Hary","age":87,"salary":10000},{"empid":900,"name":"Hary","age":21,"salary":50000}] Facebook US [{"empid":700,"name":"Aron","age":21,"salary":50000},{"empid":320,"name":"Aron","age":18,"salary":70000},{"empid":310,"name":"Ben","age":31,"salary":21000},{"empid":390,"name":"Ben","age":21,"salary":70000},{"empid":200,"name":"Keiko","age":28,"salary":80000},{"empid":206,"name":"Keiko","age":41,"salary":80500}] Microsoft UK [{"empid":730,"name":"Eden","age":45,"salary":20300},{"empid":260,"name":"Eden","age":31,"salary":50020},{"empid":313,"name":"James","age":11,"salary":30000},{"empid":600,"name":"James","age":21,"salary":70000},{"empid":900,"name":"Spiro","age":28,"salary":80000},{"empid":300,"name":"Spiro","age":38,"salary":80300}] -PREHOOK: query: --Sort tuple array by field names : name,salary(multiple columns) ascending order -select company,country,sort_array_by(employee,'name','salary','asc') as multiple_field_sort from sort_array_by_table +PREHOOK: query: select company,country,sort_array_by(employee,'name','salary','asc') as multiple_field_sort from sort_array_by_table PREHOOK: type: QUERY PREHOOK: Input: default@sort_array_by_table #### A masked pattern was here #### -POSTHOOK: query: --Sort tuple array by field names : name,salary(multiple columns) ascending order -select company,country,sort_array_by(employee,'name','salary','asc') as multiple_field_sort from sort_array_by_table +POSTHOOK: query: select company,country,sort_array_by(employee,'name','salary','asc') as multiple_field_sort from sort_array_by_table POSTHOOK: type: QUERY POSTHOOK: Input: default@sort_array_by_table #### A masked pattern was here #### Google IN [{"empid":756,"name":"Able","age":23,"salary":76889},{"empid":800,"name":"Able","age":28,"salary":80000},{"empid":100,"name":"Boo","age":21,"salary":70000},{"empid":130,"name":"Boo","age":22,"salary":79000},{"empid":76,"name":"Hary","age":87,"salary":10000},{"empid":900,"name":"Hary","age":21,"salary":50000}] Facebook US [{"empid":700,"name":"Aron","age":21,"salary":50000},{"empid":320,"name":"Aron","age":18,"salary":70000},{"empid":310,"name":"Ben","age":31,"salary":21000},{"empid":390,"name":"Ben","age":21,"salary":70000},{"empid":200,"name":"Keiko","age":28,"salary":80000},{"empid":206,"name":"Keiko","age":41,"salary":80500}] Microsoft UK [{"empid":730,"name":"Eden","age":45,"salary":20300},{"empid":260,"name":"Eden","age":31,"salary":50020},{"empid":313,"name":"James","age":11,"salary":30000},{"empid":600,"name":"James","age":21,"salary":70000},{"empid":900,"name":"Spiro","age":28,"salary":80000},{"empid":300,"name":"Spiro","age":38,"salary":80300}] -PREHOOK: query: --Sort tuple array by field names : name,salary(multiple columns) descending order -select company,country,sort_array_by(employee,'name',"salary","DESC") as multiple_field_sort from sort_array_by_table +PREHOOK: query: select company,country,sort_array_by(employee,'name',"salary","DESC") as multiple_field_sort from sort_array_by_table PREHOOK: type: QUERY PREHOOK: Input: default@sort_array_by_table #### A masked pattern was here #### -POSTHOOK: query: --Sort tuple array by field names : name,salary(multiple columns) descending order -select company,country,sort_array_by(employee,'name',"salary","DESC") as multiple_field_sort from sort_array_by_table +POSTHOOK: query: select company,country,sort_array_by(employee,'name',"salary","DESC") as multiple_field_sort from sort_array_by_table POSTHOOK: type: QUERY POSTHOOK: Input: default@sort_array_by_table #### A masked pattern was here #### Google IN [{"empid":900,"name":"Hary","age":21,"salary":50000},{"empid":76,"name":"Hary","age":87,"salary":10000},{"empid":130,"name":"Boo","age":22,"salary":79000},{"empid":100,"name":"Boo","age":21,"salary":70000},{"empid":800,"name":"Able","age":28,"salary":80000},{"empid":756,"name":"Able","age":23,"salary":76889}] Facebook US [{"empid":206,"name":"Keiko","age":41,"salary":80500},{"empid":200,"name":"Keiko","age":28,"salary":80000},{"empid":390,"name":"Ben","age":21,"salary":70000},{"empid":310,"name":"Ben","age":31,"salary":21000},{"empid":320,"name":"Aron","age":18,"salary":70000},{"empid":700,"name":"Aron","age":21,"salary":50000}] Microsoft UK [{"empid":300,"name":"Spiro","age":38,"salary":80300},{"empid":900,"name":"Spiro","age":28,"salary":80000},{"empid":600,"name":"James","age":21,"salary":70000},{"empid":313,"name":"James","age":11,"salary":30000},{"empid":260,"name":"Eden","age":31,"salary":50020},{"empid":730,"name":"Eden","age":45,"salary":20300}] -PREHOOK: query: --Above three in one query -select company,country, +PREHOOK: query: select company,country, sort_array_by(employee,'name','salary') as multiple_field_sort, sort_array_by(employee,'name','salary','ASC') as multiple_field_sort_asc, sort_array_by(employee,'name',"salary","DESC") as multiple_field_sort_desc @@ -216,8 +197,7 @@ from sort_array_by_table PREHOOK: type: QUERY PREHOOK: Input: default@sort_array_by_table #### A masked pattern was here #### -POSTHOOK: query: --Above three in one query -select company,country, +POSTHOOK: query: select company,country, sort_array_by(employee,'name','salary') as multiple_field_sort, sort_array_by(employee,'name','salary','ASC') as multiple_field_sort_asc, sort_array_by(employee,'name',"salary","DESC") as multiple_field_sort_desc @@ -228,11 +208,9 @@ POSTHOOK: Input: default@sort_array_by_table Google IN [{"empid":756,"name":"Able","age":23,"salary":76889},{"empid":800,"name":"Able","age":28,"salary":80000},{"empid":100,"name":"Boo","age":21,"salary":70000},{"empid":130,"name":"Boo","age":22,"salary":79000},{"empid":76,"name":"Hary","age":87,"salary":10000},{"empid":900,"name":"Hary","age":21,"salary":50000}] [{"empid":756,"name":"Able","age":23,"salary":76889},{"empid":800,"name":"Able","age":28,"salary":80000},{"empid":100,"name":"Boo","age":21,"salary":70000},{"empid":130,"name":"Boo","age":22,"salary":79000},{"empid":76,"name":"Hary","age":87,"salary":10000},{"empid":900,"name":"Hary","age":21,"salary":50000}] [{"empid":900,"name":"Hary","age":21,"salary":50000},{"empid":76,"name":"Hary","age":87,"salary":10000},{"empid":130,"name":"Boo","age":22,"salary":79000},{"empid":100,"name":"Boo","age":21,"salary":70000},{"empid":800,"name":"Able","age":28,"salary":80000},{"empid":756,"name":"Able","age":23,"salary":76889}] Facebook US [{"empid":700,"name":"Aron","age":21,"salary":50000},{"empid":320,"name":"Aron","age":18,"salary":70000},{"empid":310,"name":"Ben","age":31,"salary":21000},{"empid":390,"name":"Ben","age":21,"salary":70000},{"empid":200,"name":"Keiko","age":28,"salary":80000},{"empid":206,"name":"Keiko","age":41,"salary":80500}] [{"empid":700,"name":"Aron","age":21,"salary":50000},{"empid":320,"name":"Aron","age":18,"salary":70000},{"empid":310,"name":"Ben","age":31,"salary":21000},{"empid":390,"name":"Ben","age":21,"salary":70000},{"empid":200,"name":"Keiko","age":28,"salary":80000},{"empid":206,"name":"Keiko","age":41,"salary":80500}] [{"empid":206,"name":"Keiko","age":41,"salary":80500},{"empid":200,"name":"Keiko","age":28,"salary":80000},{"empid":390,"name":"Ben","age":21,"salary":70000},{"empid":310,"name":"Ben","age":31,"salary":21000},{"empid":320,"name":"Aron","age":18,"salary":70000},{"empid":700,"name":"Aron","age":21,"salary":50000}] Microsoft UK [{"empid":730,"name":"Eden","age":45,"salary":20300},{"empid":260,"name":"Eden","age":31,"salary":50020},{"empid":313,"name":"James","age":11,"salary":30000},{"empid":600,"name":"James","age":21,"salary":70000},{"empid":900,"name":"Spiro","age":28,"salary":80000},{"empid":300,"name":"Spiro","age":38,"salary":80300}] [{"empid":730,"name":"Eden","age":45,"salary":20300},{"empid":260,"name":"Eden","age":31,"salary":50020},{"empid":313,"name":"James","age":11,"salary":30000},{"empid":600,"name":"James","age":21,"salary":70000},{"empid":900,"name":"Spiro","age":28,"salary":80000},{"empid":300,"name":"Spiro","age":38,"salary":80300}] [{"empid":300,"name":"Spiro","age":38,"salary":80300},{"empid":900,"name":"Spiro","age":28,"salary":80000},{"empid":600,"name":"James","age":21,"salary":70000},{"empid":313,"name":"James","age":11,"salary":30000},{"empid":260,"name":"Eden","age":31,"salary":50020},{"empid":730,"name":"Eden","age":45,"salary":20300}] -PREHOOK: query: -- Test for order name ('ASC' and 'DESC') as tuple field names and and order name -DROP TABLE IF EXISTS sort_array_by_order_name +PREHOOK: query: DROP TABLE IF EXISTS sort_array_by_order_name PREHOOK: type: DROPTABLE -POSTHOOK: query: -- Test for order name ('ASC' and 'DESC') as tuple field names and and order name -DROP TABLE IF EXISTS sort_array_by_order_name +POSTHOOK: query: DROP TABLE IF EXISTS sort_array_by_order_name POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE sort_array_by_order_name STORED AS TEXTFILE @@ -262,8 +240,7 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@sort_array_by_order_name POSTHOOK: Lineage: sort_array_by_order_name.company SIMPLE [] POSTHOOK: Lineage: sort_array_by_order_name.employee EXPRESSION [] -PREHOOK: query: -- select asc,desc as filed name with default sorting -select +PREHOOK: query: select company, sort_array_by(employee,'asc') as col1, sort_array_by(employee,'DESC') as col2 @@ -271,8 +248,7 @@ from sort_array_by_order_name PREHOOK: type: QUERY PREHOOK: Input: default@sort_array_by_order_name #### A masked pattern was here #### -POSTHOOK: query: -- select asc,desc as filed name with default sorting -select +POSTHOOK: query: select company, sort_array_by(employee,'asc') as col1, sort_array_by(employee,'DESC') as col2 @@ -281,10 +257,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@sort_array_by_order_name #### A masked pattern was here #### Google [{"asc":"Able","desc":"Keiko","salary":28},{"asc":"Boo","desc":"Aron","salary":70000},{"asc":"Hary","desc":"James","salary":50000}] [{"asc":"Boo","desc":"Aron","salary":70000},{"asc":"Hary","desc":"James","salary":50000},{"asc":"Able","desc":"Keiko","salary":28}] -PREHOOK: query: --select asc,desc as field name and explicitly provided sorting ordering. ---If argument length's size are more than two (first: tuple list,second: desired minimum a field name) ---then we always check whether the last argument is any sorting order name(ASC or DESC) -select +PREHOOK: query: select company, sort_array_by(employee,'asc','ASC') as col1, sort_array_by(employee,'DESC','desc') as col2 @@ -293,10 +266,7 @@ sort_array_by_order_name PREHOOK: type: QUERY PREHOOK: Input: default@sort_array_by_order_name #### A masked pattern was here #### -POSTHOOK: query: --select asc,desc as field name and explicitly provided sorting ordering. ---If argument length's size are more than two (first: tuple list,second: desired minimum a field name) ---then we always check whether the last argument is any sorting order name(ASC or DESC) -select +POSTHOOK: query: select company, sort_array_by(employee,'asc','ASC') as col1, sort_array_by(employee,'DESC','desc') as col2 @@ -306,13 +276,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@sort_array_by_order_name #### A masked pattern was here #### Google [{"asc":"Able","desc":"Keiko","salary":28},{"asc":"Boo","desc":"Aron","salary":70000},{"asc":"Hary","desc":"James","salary":50000}] [{"asc":"Able","desc":"Keiko","salary":28},{"asc":"Hary","desc":"James","salary":50000},{"asc":"Boo","desc":"Aron","salary":70000}] -PREHOOK: query: -- similarity of sorting order check between this UDF and LATERAL VIEW explode(array). - -DROP TABLE IF EXISTS sort_array_by_table_order +PREHOOK: query: DROP TABLE IF EXISTS sort_array_by_table_order PREHOOK: type: DROPTABLE -POSTHOOK: query: -- similarity of sorting order check between this UDF and LATERAL VIEW explode(array). - -DROP TABLE IF EXISTS sort_array_by_table_order +POSTHOOK: query: DROP TABLE IF EXISTS sort_array_by_table_order POSTHOOK: type: DROPTABLE PREHOOK: query: CREATE TABLE sort_array_by_table_order STORED AS TEXTFILE diff --git a/ql/src/test/results/clientpositive/udf_substr.q.out b/ql/src/test/results/clientpositive/udf_substr.q.out index 10bb66c..4489006 100644 --- a/ql/src/test/results/clientpositive/udf_substr.q.out +++ b/ql/src/test/results/clientpositive/udf_substr.q.out @@ -120,8 +120,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### C C C C B BC BC BC A AB ABC ABC -PREHOOK: query: -- substring() is a synonim of substr(), so just perform some basic tests -SELECT +PREHOOK: query: SELECT substring('ABCDEFG', 3, 4), substring('ABCDEFG', -5, 4), substring('ABCDEFG', 3), substring('ABCDEFG', -5), substring('ABC', 0), substring('ABC', 1), substring('ABC', 2), substring('ABC', 3), @@ -131,8 +130,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- substring() is a synonim of substr(), so just perform some basic tests -SELECT +POSTHOOK: query: SELECT substring('ABCDEFG', 3, 4), substring('ABCDEFG', -5, 4), substring('ABCDEFG', 3), substring('ABCDEFG', -5), substring('ABC', 0), substring('ABC', 1), substring('ABC', 2), substring('ABC', 3), @@ -143,8 +141,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### CDEF CDEF CDEFG CDEFG ABC ABC BC C ABC BC A A A -PREHOOK: query: -- test for binary substr -SELECT +PREHOOK: query: SELECT substr(null, 1), substr(null, 1, 1), substr(ABC, null), substr(ABC, null, 1), substr(ABC, 1, null), @@ -163,8 +160,7 @@ FROM ( PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- test for binary substr -SELECT +POSTHOOK: query: SELECT substr(null, 1), substr(null, 1, 1), substr(ABC, null), substr(ABC, null, 1), substr(ABC, 1, null), @@ -184,8 +180,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### NULL NULL NULL NULL NULL A AB ABC ABC A AB ABC ABC B BC BC BC C C C C C C C C B BC BC BC A AB ABC ABC -PREHOOK: query: -- test UTF-8 substr -SELECT +PREHOOK: query: SELECT substr("玩", 1), substr("abc 玩", 5), substr("abc 玩玩玩 abc", 5), @@ -194,8 +189,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- test UTF-8 substr -SELECT +POSTHOOK: query: SELECT substr("玩", 1), substr("abc 玩", 5), substr("abc 玩玩玩 abc", 5), diff --git a/ql/src/test/results/clientpositive/udf_substring.q.out b/ql/src/test/results/clientpositive/udf_substring.q.out index cd7dce6..72898e6 100644 --- a/ql/src/test/results/clientpositive/udf_substring.q.out +++ b/ql/src/test/results/clientpositive/udf_substring.q.out @@ -1,8 +1,6 @@ -PREHOOK: query: -- Synonym. See udf_substr.q -DESCRIBE FUNCTION substring +PREHOOK: query: DESCRIBE FUNCTION substring PREHOOK: type: DESCFUNCTION -POSTHOOK: query: -- Synonym. See udf_substr.q -DESCRIBE FUNCTION substring +POSTHOOK: query: DESCRIBE FUNCTION substring POSTHOOK: type: DESCFUNCTION substring(str, pos[, len]) - returns the substring of str that starts at pos and is of length len orsubstring(bin, pos[, len]) - returns the slice of byte array that starts at pos and is of length len PREHOOK: query: DESCRIBE FUNCTION EXTENDED substring diff --git a/ql/src/test/results/clientpositive/udf_substring_index.q.out b/ql/src/test/results/clientpositive/udf_substring_index.q.out index a519d8e..b7347e6 100644 --- a/ql/src/test/results/clientpositive/udf_substring_index.q.out +++ b/ql/src/test/results/clientpositive/udf_substring_index.q.out @@ -60,15 +60,15 @@ POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### www.apache.org www.apache www org apache.org www.apache.org PREHOOK: query: select ---str is empty string + substring_index('', '.', 2), ---delim is empty string + substring_index('www.apache.org', '', 1), ---delim does not exist in str + substring_index('www.apache.org', '-', 2), ---delim is two chars + substring_index('www||apache||org', '||', 2), ---null + substring_index(cast(null as string), '.', 2), substring_index('www.apache.org', cast(null as string), 2), substring_index('www.apache.org', '.', cast(null as int)) @@ -76,15 +76,15 @@ PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### POSTHOOK: query: select ---str is empty string + substring_index('', '.', 2), ---delim is empty string + substring_index('www.apache.org', '', 1), ---delim does not exist in str + substring_index('www.apache.org', '-', 2), ---delim is two chars + substring_index('www||apache||org', '||', 2), ---null + substring_index(cast(null as string), '.', 2), substring_index('www.apache.org', cast(null as string), 2), substring_index('www.apache.org', '.', cast(null as int)) @@ -92,15 +92,13 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### www.apache.org www||apache NULL NULL NULL -PREHOOK: query: --varchar and char -select +PREHOOK: query: select substring_index(cast('www.apache.org' as varchar(20)), '.', 2), substring_index(cast('www.apache.org' as char(20)), '.', 2) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: --varchar and char -select +POSTHOOK: query: select substring_index(cast('www.apache.org' as varchar(20)), '.', 2), substring_index(cast('www.apache.org' as char(20)), '.', 2) POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/udf_to_boolean.q.out b/ql/src/test/results/clientpositive/udf_to_boolean.q.out index d50d8c2..ebce364 100644 --- a/ql/src/test/results/clientpositive/udf_to_boolean.q.out +++ b/ql/src/test/results/clientpositive/udf_to_boolean.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- 'true' cases: - -SELECT CAST(CAST(1 AS TINYINT) AS BOOLEAN) FROM src tablesample (1 rows) +PREHOOK: query: SELECT CAST(CAST(1 AS TINYINT) AS BOOLEAN) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- 'true' cases: - -SELECT CAST(CAST(1 AS TINYINT) AS BOOLEAN) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT CAST(CAST(1 AS TINYINT) AS BOOLEAN) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### @@ -83,15 +79,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### true -PREHOOK: query: -- 'false' cases: - -SELECT CAST(CAST(0 AS TINYINT) AS BOOLEAN) FROM src tablesample (1 rows) +PREHOOK: query: SELECT CAST(CAST(0 AS TINYINT) AS BOOLEAN) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- 'false' cases: - -SELECT CAST(CAST(0 AS TINYINT) AS BOOLEAN) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT CAST(CAST(0 AS TINYINT) AS BOOLEAN) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### @@ -168,13 +160,11 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### false -PREHOOK: query: -- 'NULL' cases: -SELECT CAST(NULL AS BOOLEAN) FROM src tablesample (1 rows) +PREHOOK: query: SELECT CAST(NULL AS BOOLEAN) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- 'NULL' cases: -SELECT CAST(NULL AS BOOLEAN) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT CAST(NULL AS BOOLEAN) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udf_to_byte.q.out b/ql/src/test/results/clientpositive/udf_to_byte.q.out index 642584b..078c476 100644 --- a/ql/src/test/results/clientpositive/udf_to_byte.q.out +++ b/ql/src/test/results/clientpositive/udf_to_byte.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Conversion of main primitive types to Byte type: -SELECT CAST(NULL AS TINYINT) FROM src tablesample (1 rows) +PREHOOK: query: SELECT CAST(NULL AS TINYINT) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Conversion of main primitive types to Byte type: -SELECT CAST(NULL AS TINYINT) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT CAST(NULL AS TINYINT) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udf_to_double.q.out b/ql/src/test/results/clientpositive/udf_to_double.q.out index 7efdcd5..779da5e 100644 --- a/ql/src/test/results/clientpositive/udf_to_double.q.out +++ b/ql/src/test/results/clientpositive/udf_to_double.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Conversion of main primitive types to Double type: -SELECT CAST(NULL AS DOUBLE) FROM src tablesample (1 rows) +PREHOOK: query: SELECT CAST(NULL AS DOUBLE) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Conversion of main primitive types to Double type: -SELECT CAST(NULL AS DOUBLE) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT CAST(NULL AS DOUBLE) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udf_to_float.q.out b/ql/src/test/results/clientpositive/udf_to_float.q.out index 9868796..e1b10a2 100644 --- a/ql/src/test/results/clientpositive/udf_to_float.q.out +++ b/ql/src/test/results/clientpositive/udf_to_float.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Conversion of main primitive types to Float type: -SELECT CAST(NULL AS FLOAT) FROM src tablesample (1 rows) +PREHOOK: query: SELECT CAST(NULL AS FLOAT) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Conversion of main primitive types to Float type: -SELECT CAST(NULL AS FLOAT) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT CAST(NULL AS FLOAT) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udf_to_long.q.out b/ql/src/test/results/clientpositive/udf_to_long.q.out index 015b6a5..fe7c8b7 100644 --- a/ql/src/test/results/clientpositive/udf_to_long.q.out +++ b/ql/src/test/results/clientpositive/udf_to_long.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Conversion of main primitive types to Long type: -SELECT CAST(NULL AS BIGINT) FROM src tablesample (1 rows) +PREHOOK: query: SELECT CAST(NULL AS BIGINT) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Conversion of main primitive types to Long type: -SELECT CAST(NULL AS BIGINT) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT CAST(NULL AS BIGINT) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udf_to_short.q.out b/ql/src/test/results/clientpositive/udf_to_short.q.out index 06eb815..8c8ddb6 100644 --- a/ql/src/test/results/clientpositive/udf_to_short.q.out +++ b/ql/src/test/results/clientpositive/udf_to_short.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Conversion of main primitive types to Short type: -SELECT CAST(NULL AS SMALLINT) FROM src tablesample (1 rows) +PREHOOK: query: SELECT CAST(NULL AS SMALLINT) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Conversion of main primitive types to Short type: -SELECT CAST(NULL AS SMALLINT) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT CAST(NULL AS SMALLINT) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udf_to_string.q.out b/ql/src/test/results/clientpositive/udf_to_string.q.out index 9defcd9..bf2f72d 100644 --- a/ql/src/test/results/clientpositive/udf_to_string.q.out +++ b/ql/src/test/results/clientpositive/udf_to_string.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Conversion of main primitive types to String type: -SELECT CAST(NULL AS STRING) FROM src tablesample (1 rows) +PREHOOK: query: SELECT CAST(NULL AS STRING) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Conversion of main primitive types to String type: -SELECT CAST(NULL AS STRING) FROM src tablesample (1 rows) +POSTHOOK: query: SELECT CAST(NULL AS STRING) FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udf_to_unix_timestamp.q.out b/ql/src/test/results/clientpositive/udf_to_unix_timestamp.q.out index 4fd0f41..778eac4 100644 --- a/ql/src/test/results/clientpositive/udf_to_unix_timestamp.q.out +++ b/ql/src/test/results/clientpositive/udf_to_unix_timestamp.q.out @@ -87,11 +87,9 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@oneline #### A masked pattern was here #### random_string NULL -PREHOOK: query: -- PPD -explain select * from (select * from src) a where unix_timestamp(a.key) > 10 +PREHOOK: query: explain select * from (select * from src) a where unix_timestamp(a.key) > 10 PREHOOK: type: QUERY -POSTHOOK: query: -- PPD -explain select * from (select * from src) a where unix_timestamp(a.key) > 10 +POSTHOOK: query: explain select * from (select * from src) a where unix_timestamp(a.key) > 10 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage diff --git a/ql/src/test/results/clientpositive/udf_translate.q.out b/ql/src/test/results/clientpositive/udf_translate.q.out index f7c0a18..9004565 100644 --- a/ql/src/test/results/clientpositive/udf_translate.q.out +++ b/ql/src/test/results/clientpositive/udf_translate.q.out @@ -22,13 +22,11 @@ For example, translate('abcdef', 'ada', '192') returns '1bc9ef' replaces 'a' with '1' and 'd' with '9' ignoring the second occurence of 'a' in the from string mapping it to '2' Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFTranslate Function type:BUILTIN -PREHOOK: query: -- Create some tables to serve some input data -CREATE TABLE table_input(input STRING) +PREHOOK: query: CREATE TABLE table_input(input STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@table_input -POSTHOOK: query: -- Create some tables to serve some input data -CREATE TABLE table_input(input STRING) +POSTHOOK: query: CREATE TABLE table_input(input STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@table_input @@ -60,53 +58,45 @@ POSTHOOK: Output: default@table_translate POSTHOOK: Lineage: table_translate.from_string SIMPLE [] POSTHOOK: Lineage: table_translate.input_string SIMPLE [] POSTHOOK: Lineage: table_translate.to_string SIMPLE [] -PREHOOK: query: -- Run some queries on constant input parameters -SELECT translate('abcd', 'ab', '12'), +PREHOOK: query: SELECT translate('abcd', 'ab', '12'), translate('abcd', 'abc', '12') FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Run some queries on constant input parameters -SELECT translate('abcd', 'ab', '12'), +POSTHOOK: query: SELECT translate('abcd', 'ab', '12'), translate('abcd', 'abc', '12') FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 12cd 12d -PREHOOK: query: -- Run some queries where first parameter being a table column while the other two being constants -SELECT translate(table_input.input, 'ab', '12'), +PREHOOK: query: SELECT translate(table_input.input, 'ab', '12'), translate(table_input.input, 'abc', '12') FROM table_input tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@table_input #### A masked pattern was here #### -POSTHOOK: query: -- Run some queries where first parameter being a table column while the other two being constants -SELECT translate(table_input.input, 'ab', '12'), +POSTHOOK: query: SELECT translate(table_input.input, 'ab', '12'), translate(table_input.input, 'abc', '12') FROM table_input tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@table_input #### A masked pattern was here #### 12cd 12d -PREHOOK: query: -- Run some queries where all parameters are coming from table columns -SELECT translate(input_string, from_string, to_string) FROM table_translate tablesample (1 rows) +PREHOOK: query: SELECT translate(input_string, from_string, to_string) FROM table_translate tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@table_translate #### A masked pattern was here #### -POSTHOOK: query: -- Run some queries where all parameters are coming from table columns -SELECT translate(input_string, from_string, to_string) FROM table_translate tablesample (1 rows) +POSTHOOK: query: SELECT translate(input_string, from_string, to_string) FROM table_translate tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@table_translate #### A masked pattern was here #### 1bc -PREHOOK: query: -- Run some queries where some parameters are NULL -SELECT translate(NULL, 'ab', '12'), +PREHOOK: query: SELECT translate(NULL, 'ab', '12'), translate('abcd', NULL, '12'), translate('abcd', 'ab', NULL), translate(NULL, NULL, NULL) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Run some queries where some parameters are NULL -SELECT translate(NULL, 'ab', '12'), +POSTHOOK: query: SELECT translate(NULL, 'ab', '12'), translate('abcd', NULL, '12'), translate('abcd', 'ab', NULL), translate(NULL, NULL, NULL) FROM src tablesample (1 rows) @@ -114,50 +104,42 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### NULL NULL NULL NULL -PREHOOK: query: -- Run some queries where the same character appears several times in the from string (2nd argument) of the UDF -SELECT translate('abcd', 'aba', '123'), +PREHOOK: query: SELECT translate('abcd', 'aba', '123'), translate('abcd', 'aba', '12') FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Run some queries where the same character appears several times in the from string (2nd argument) of the UDF -SELECT translate('abcd', 'aba', '123'), +POSTHOOK: query: SELECT translate('abcd', 'aba', '123'), translate('abcd', 'aba', '12') FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 12cd 12cd -PREHOOK: query: -- Run some queries for the ignorant case when the 3rd parameter has more characters than the second one -SELECT translate('abcd', 'abc', '1234') FROM src tablesample (1 rows) +PREHOOK: query: SELECT translate('abcd', 'abc', '1234') FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Run some queries for the ignorant case when the 3rd parameter has more characters than the second one -SELECT translate('abcd', 'abc', '1234') FROM src tablesample (1 rows) +POSTHOOK: query: SELECT translate('abcd', 'abc', '1234') FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 123d -PREHOOK: query: -- Test proper function over UTF-8 characters -SELECT translate('Àbcd', 'À', 'Ã') FROM src tablesample (1 rows) +PREHOOK: query: SELECT translate('Àbcd', 'À', 'Ã') FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Test proper function over UTF-8 characters -SELECT translate('Àbcd', 'À', 'Ã') FROM src tablesample (1 rows) +POSTHOOK: query: SELECT translate('Àbcd', 'À', 'Ã') FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### Ãbcd -PREHOOK: query: -- Run some queries where the arguments are not strings but chars and varchars -SELECT translate(CAST('abcd' AS CHAR(5)), CAST('aba' AS VARCHAR(5)), CAST('123' AS CHAR(5))), +PREHOOK: query: SELECT translate(CAST('abcd' AS CHAR(5)), CAST('aba' AS VARCHAR(5)), CAST('123' AS CHAR(5))), translate(CAST('abcd' AS VARCHAR(9)), CAST('aba' AS CHAR(9)), CAST('12' AS VARCHAR(9))) FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Run some queries where the arguments are not strings but chars and varchars -SELECT translate(CAST('abcd' AS CHAR(5)), CAST('aba' AS VARCHAR(5)), CAST('123' AS CHAR(5))), +POSTHOOK: query: SELECT translate(CAST('abcd' AS CHAR(5)), CAST('aba' AS VARCHAR(5)), CAST('123' AS CHAR(5))), translate(CAST('abcd' AS VARCHAR(9)), CAST('aba' AS CHAR(9)), CAST('12' AS VARCHAR(9))) FROM src tablesample (1 rows) POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/udf_trunc.q.out b/ql/src/test/results/clientpositive/udf_trunc.q.out index 36b9a13..7874d52 100644 --- a/ql/src/test/results/clientpositive/udf_trunc.q.out +++ b/ql/src/test/results/clientpositive/udf_trunc.q.out @@ -28,8 +28,7 @@ OK 1234567891 Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFTrunc Function type:BUILTIN -PREHOOK: query: --test string with 'MM' as format -EXPLAIN +PREHOOK: query: EXPLAIN SELECT TRUNC('2014-01-01', 'MM'), TRUNC('2014-01-14', 'MM'), @@ -48,8 +47,7 @@ SELECT TRUNC('2016-02-28 10:30:45', 'MM'), TRUNC('2016-02-29 10:30:45', 'MM') PREHOOK: type: QUERY -POSTHOOK: query: --test string with 'MM' as format -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT TRUNC('2014-01-01', 'MM'), TRUNC('2014-01-14', 'MM'), @@ -127,8 +125,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 2014-01-01 2014-01-01 2014-01-01 2014-02-01 2014-02-01 2016-02-01 2016-02-01 2016-02-01 2014-01-01 2014-01-01 2014-01-01 2014-02-01 2014-02-01 2016-02-01 2016-02-01 2016-02-01 -PREHOOK: query: --test string with 'YEAR' as format -EXPLAIN +PREHOOK: query: EXPLAIN SELECT TRUNC('2014-01-01', 'YEAR'), TRUNC('2014-01-14', 'YEAR'), @@ -147,8 +144,7 @@ SELECT TRUNC('2016-02-28 10:30:45', 'YEAR'), TRUNC('2016-02-29 10:30:45', 'YEAR') PREHOOK: type: QUERY -POSTHOOK: query: --test string with 'YEAR' as format -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT TRUNC('2014-01-01', 'YEAR'), TRUNC('2014-01-14', 'YEAR'), @@ -226,8 +222,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 2014-01-01 2014-01-01 2014-01-01 2014-01-01 2014-01-01 2016-01-01 2016-01-01 2016-01-01 2014-01-01 2014-01-01 2014-01-01 2014-01-01 2014-01-01 2016-01-01 2016-01-01 2016-01-01 -PREHOOK: query: --test timestamp with 'MM' as format -EXPLAIN +PREHOOK: query: EXPLAIN SELECT TRUNC(CAST('2014-01-01 00:00:00' AS TIMESTAMP), 'MM'), TRUNC(CAST('2014-01-14 00:00:00' AS TIMESTAMP), 'MM'), @@ -246,8 +241,7 @@ SELECT TRUNC(CAST('2016-02-28 10:30:45' AS TIMESTAMP), 'MM'), TRUNC(CAST('2016-02-29 10:30:45' AS TIMESTAMP), 'MM') PREHOOK: type: QUERY -POSTHOOK: query: --test timestamp with 'MM' as format -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT TRUNC(CAST('2014-01-01 00:00:00' AS TIMESTAMP), 'MM'), TRUNC(CAST('2014-01-14 00:00:00' AS TIMESTAMP), 'MM'), @@ -325,8 +319,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 2014-01-01 2014-01-01 2014-01-01 2014-02-01 2014-02-01 2016-02-01 2016-02-01 2016-02-01 2014-01-01 2014-01-01 2014-01-01 2014-02-01 2014-02-01 2016-02-01 2016-02-01 2016-02-01 -PREHOOK: query: --test timestamp with 'YEAR' as format -EXPLAIN +PREHOOK: query: EXPLAIN SELECT TRUNC(CAST('2014-01-01 00:00:00' AS TIMESTAMP), 'YEAR'), TRUNC(CAST('2014-01-14 00:00:00' AS TIMESTAMP), 'YEAR'), @@ -345,8 +338,7 @@ SELECT TRUNC(CAST('2016-02-28 10:30:45' AS TIMESTAMP), 'YEAR'), TRUNC(CAST('2016-02-29 10:30:45' AS TIMESTAMP), 'YEAR') PREHOOK: type: QUERY -POSTHOOK: query: --test timestamp with 'YEAR' as format -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT TRUNC(CAST('2014-01-01 00:00:00' AS TIMESTAMP), 'YEAR'), TRUNC(CAST('2014-01-14 00:00:00' AS TIMESTAMP), 'YEAR'), @@ -424,8 +416,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 2014-01-01 2014-01-01 2014-01-01 2014-01-01 2014-01-01 2016-01-01 2016-01-01 2016-01-01 2014-01-01 2014-01-01 2014-01-01 2014-01-01 2014-01-01 2016-01-01 2016-01-01 2016-01-01 -PREHOOK: query: --test date with 'MM' as format -EXPLAIN +PREHOOK: query: EXPLAIN SELECT TRUNC(CAST('2014-01-01' AS DATE), 'MM'), TRUNC(CAST('2014-01-14' AS DATE), 'MM'), @@ -436,8 +427,7 @@ SELECT TRUNC(CAST('2016-02-28' AS DATE), 'MM'), TRUNC(CAST('2016-02-29' AS DATE), 'MM') PREHOOK: type: QUERY -POSTHOOK: query: --test date with 'MM' as format -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT TRUNC(CAST('2014-01-01' AS DATE), 'MM'), TRUNC(CAST('2014-01-14' AS DATE), 'MM'), @@ -491,8 +481,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 2014-01-01 2014-01-01 2014-01-01 2014-02-01 2014-02-01 2016-02-01 2016-02-01 2016-02-01 -PREHOOK: query: --test date with 'YEAR' as format -EXPLAIN +PREHOOK: query: EXPLAIN SELECT TRUNC(CAST('2014-01-01' AS DATE), 'YEAR'), TRUNC(CAST('2014-01-14' AS DATE), 'YEAR'), @@ -503,8 +492,7 @@ SELECT TRUNC(CAST('2016-02-28' AS DATE), 'YEAR'), TRUNC(CAST('2016-02-29' AS DATE), 'YEAR') PREHOOK: type: QUERY -POSTHOOK: query: --test date with 'YEAR' as format -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT TRUNC(CAST('2014-01-01' AS DATE), 'YEAR'), TRUNC(CAST('2014-01-14' AS DATE), 'YEAR'), @@ -558,8 +546,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 2014-01-01 2014-01-01 2014-01-01 2014-01-01 2014-01-01 2016-01-01 2016-01-01 2016-01-01 -PREHOOK: query: --test misc with 'MM' as format -EXPLAIN +PREHOOK: query: EXPLAIN SELECT TRUNC('2014-01-34', 'MM'), TRUNC(CAST(null AS STRING), 'MM'), @@ -568,8 +555,7 @@ SELECT TRUNC('2014-01-01', 'M'), TRUNC('2014-01-01', CAST(null AS STRING)) PREHOOK: type: QUERY -POSTHOOK: query: --test misc with 'MM' as format -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT TRUNC('2014-01-34', 'MM'), TRUNC(CAST(null AS STRING), 'MM'), @@ -617,8 +603,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### 2014-02-01 NULL NULL NULL NULL NULL -PREHOOK: query: --test misc with 'YEAR' as format -EXPLAIN +PREHOOK: query: EXPLAIN SELECT TRUNC('2014-01-34', 'YEAR'), TRUNC(CAST(null AS STRING), 'YEAR'), @@ -627,8 +612,7 @@ SELECT TRUNC('2014-01-01', 'M'), TRUNC('2014-01-01', CAST(null AS STRING)) PREHOOK: type: QUERY -POSTHOOK: query: --test misc with 'YEAR' as format -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT TRUNC('2014-01-34', 'YEAR'), TRUNC(CAST(null AS STRING), 'YEAR'), diff --git a/ql/src/test/results/clientpositive/udf_unhex.q.out b/ql/src/test/results/clientpositive/udf_unhex.q.out index 86e2c1b..9b7c626 100644 --- a/ql/src/test/results/clientpositive/udf_unhex.q.out +++ b/ql/src/test/results/clientpositive/udf_unhex.q.out @@ -23,9 +23,7 @@ any nonhexadecimal digits in the argument, it returns NULL. Also, if there are an odd number of characters a leading 0 is appended. Function class:org.apache.hadoop.hive.ql.udf.UDFUnhex Function type:BUILTIN -PREHOOK: query: -- Good inputs - -SELECT +PREHOOK: query: SELECT unhex('4D7953514C'), unhex('31323637'), unhex('61'), @@ -35,9 +33,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Good inputs - -SELECT +POSTHOOK: query: SELECT unhex('4D7953514C'), unhex('31323637'), unhex('61'), @@ -48,8 +44,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### MySQL 1267 a -4 -PREHOOK: query: -- Bad inputs -SELECT +PREHOOK: query: SELECT unhex('MySQL'), unhex('G123'), unhex('\0') @@ -57,8 +52,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Bad inputs -SELECT +POSTHOOK: query: SELECT unhex('MySQL'), unhex('G123'), unhex('\0') diff --git a/ql/src/test/results/clientpositive/udf_version.q.out b/ql/src/test/results/clientpositive/udf_version.q.out index 242e83f..7389b2b 100644 --- a/ql/src/test/results/clientpositive/udf_version.q.out +++ b/ql/src/test/results/clientpositive/udf_version.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- Normalize the version info -SELECT regexp_replace(version(), '.+ r\\w+', 'VERSION rGITHASH') +PREHOOK: query: SELECT regexp_replace(version(), '.+ r\\w+', 'VERSION rGITHASH') PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### -POSTHOOK: query: -- Normalize the version info -SELECT regexp_replace(version(), '.+ r\\w+', 'VERSION rGITHASH') +POSTHOOK: query: SELECT regexp_replace(version(), '.+ r\\w+', 'VERSION rGITHASH') POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udf_when.q.out b/ql/src/test/results/clientpositive/udf_when.q.out index f2d7eda..a05bd13 100644 --- a/ql/src/test/results/clientpositive/udf_when.q.out +++ b/ql/src/test/results/clientpositive/udf_when.q.out @@ -155,8 +155,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 2 9 14 NULL 24 NULL -PREHOOK: query: -- Allow compatible types to be used in return value -SELECT CASE +PREHOOK: query: SELECT CASE WHEN 1=1 THEN 123.0BD ELSE 0.0BD END, @@ -174,8 +173,7 @@ FROM src tablesample (1 rows) PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Allow compatible types to be used in return value -SELECT CASE +POSTHOOK: query: SELECT CASE WHEN 1=1 THEN 123.0BD ELSE 0.0BD END, diff --git a/ql/src/test/results/clientpositive/udtf_explode.q.out b/ql/src/test/results/clientpositive/udtf_explode.q.out index 4892cd5..8f68b35 100644 --- a/ql/src/test/results/clientpositive/udtf_explode.q.out +++ b/ql/src/test/results/clientpositive/udtf_explode.q.out @@ -542,13 +542,11 @@ POSTHOOK: Input: default@src 238 1 one 238 2 two 238 3 three -PREHOOK: query: -- HIVE-4295 -SELECT BLOCK__OFFSET__INSIDE__FILE, src.key, myKey, myVal FROM src lateral view explode(map(1,'one',2,'two',3,'three')) x AS myKey,myVal LIMIT 3 +PREHOOK: query: SELECT BLOCK__OFFSET__INSIDE__FILE, src.key, myKey, myVal FROM src lateral view explode(map(1,'one',2,'two',3,'three')) x AS myKey,myVal LIMIT 3 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- HIVE-4295 -SELECT BLOCK__OFFSET__INSIDE__FILE, src.key, myKey, myVal FROM src lateral view explode(map(1,'one',2,'two',3,'three')) x AS myKey,myVal LIMIT 3 +POSTHOOK: query: SELECT BLOCK__OFFSET__INSIDE__FILE, src.key, myKey, myVal FROM src lateral view explode(map(1,'one',2,'two',3,'three')) x AS myKey,myVal LIMIT 3 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udtf_json_tuple.q.out b/ql/src/test/results/clientpositive/udtf_json_tuple.q.out index 5be6eb0..44fd804 100644 --- a/ql/src/test/results/clientpositive/udtf_json_tuple.q.out +++ b/ql/src/test/results/clientpositive/udtf_json_tuple.q.out @@ -405,15 +405,11 @@ POSTHOOK: Input: default@json_t NULL 1 2 2 value2 1 -PREHOOK: query: -- Verify that json_tuple can handle new lines in JSON values - -CREATE TABLE dest1(c1 STRING) STORED AS RCFILE +PREHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS RCFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dest1 -POSTHOOK: query: -- Verify that json_tuple can handle new lines in JSON values - -CREATE TABLE dest1(c1 STRING) STORED AS RCFILE +POSTHOOK: query: CREATE TABLE dest1(c1 STRING) STORED AS RCFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dest1 diff --git a/ql/src/test/results/clientpositive/udtf_nofetchtask.q.out b/ql/src/test/results/clientpositive/udtf_nofetchtask.q.out index 86929ea..b90f515 100644 --- a/ql/src/test/results/clientpositive/udtf_nofetchtask.q.out +++ b/ql/src/test/results/clientpositive/udtf_nofetchtask.q.out @@ -4,25 +4,21 @@ PREHOOK: Output: udtfcount2 POSTHOOK: query: create temporary function udtfCount2 as 'org.apache.hadoop.hive.contrib.udtf.example.GenericUDTFCount2' POSTHOOK: type: CREATEFUNCTION POSTHOOK: Output: udtfcount2 -PREHOOK: query: -- Correct output should be 2 rows -select udtfCount2() from src +PREHOOK: query: select udtfCount2() from src PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Correct output should be 2 rows -select udtfCount2() from src +POSTHOOK: query: select udtfCount2() from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 500 500 -PREHOOK: query: -- Should still have the same output with fetch task conversion enabled -select udtfCount2() from src +PREHOOK: query: select udtfCount2() from src PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Should still have the same output with fetch task conversion enabled -select udtfCount2() from src +POSTHOOK: query: select udtfCount2() from src POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udtf_parse_url_tuple.q.out b/ql/src/test/results/clientpositive/udtf_parse_url_tuple.q.out index bbab029..51e23e5 100644 --- a/ql/src/test/results/clientpositive/udtf_parse_url_tuple.q.out +++ b/ql/src/test/results/clientpositive/udtf_parse_url_tuple.q.out @@ -214,12 +214,10 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL facebook.com /path1/p.php k1=v1&k2=v2 Ref1 http /path1/p.php?k1=v1&k2=v2 facebook.com NULL v1 sites.google.com /a/example.com/site/page NULL NULL ftp /a/example.com/site/page sites.google.com NULL NULL www.socs.uts.edu.au /MosaicDocs-old/url-primer.html k1=tps chapter1 https /MosaicDocs-old/url-primer.html?k1=tps www.socs.uts.edu.au:80 NULL tps -PREHOOK: query: -- should return null for 'host', 'query', 'QUERY:nonExistCol' -explain +PREHOOK: query: explain select a.key, b.ho, b.qu, b.qk1, b.err1, b.err2, b.err3 from url_t a lateral view parse_url_tuple(a.fullurl, 'HOST', 'PATH', 'QUERY', 'REF', 'PROTOCOL', 'FILE', 'AUTHORITY', 'USERINFO', 'QUERY:k1', 'host', 'query', 'QUERY:nonExistCol') b as ho, pa, qu, re, pr, fi, au, us, qk1, err1, err2, err3 order by a.key PREHOOK: type: QUERY -POSTHOOK: query: -- should return null for 'host', 'query', 'QUERY:nonExistCol' -explain +POSTHOOK: query: explain select a.key, b.ho, b.qu, b.qk1, b.err1, b.err2, b.err3 from url_t a lateral view parse_url_tuple(a.fullurl, 'HOST', 'PATH', 'QUERY', 'REF', 'PROTOCOL', 'FILE', 'AUTHORITY', 'USERINFO', 'QUERY:k1', 'host', 'query', 'QUERY:nonExistCol') b as ho, pa, qu, re, pr, fi, au, us, qk1, err1, err2, err3 order by a.key POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/unicode_notation.q.out b/ql/src/test/results/clientpositive/unicode_notation.q.out index 37848b0..506fc75 100644 --- a/ql/src/test/results/clientpositive/unicode_notation.q.out +++ b/ql/src/test/results/clientpositive/unicode_notation.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- HIVE-4618 hive should accept unicode notation like \uxxxx - -CREATE TABLE k1( a string)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\u0001' +PREHOOK: query: CREATE TABLE k1( a string)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\u0001' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@k1 -POSTHOOK: query: -- HIVE-4618 hive should accept unicode notation like \uxxxx - -CREATE TABLE k1( a string)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\u0001' +POSTHOOK: query: CREATE TABLE k1( a string)ROW FORMAT DELIMITED FIELDS TERMINATED BY '\u0001' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@k1 diff --git a/ql/src/test/results/clientpositive/union.q.out b/ql/src/test/results/clientpositive/union.q.out index b00b0b6..67c9ba4 100644 --- a/ql/src/test/results/clientpositive/union.q.out +++ b/ql/src/test/results/clientpositive/union.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- union case: both subqueries are map jobs on same input, followed by filesink - -EXPLAIN +PREHOOK: query: EXPLAIN FROM ( FROM src select src.key, src.value WHERE src.key < 100 UNION ALL @@ -9,10 +6,7 @@ FROM ( ) unioninput INSERT OVERWRITE DIRECTORY 'target/warehouse/union.out' SELECT unioninput.* PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF --- union case: both subqueries are map jobs on same input, followed by filesink - -EXPLAIN +POSTHOOK: query: EXPLAIN FROM ( FROM src select src.key, src.value WHERE src.key < 100 UNION ALL diff --git a/ql/src/test/results/clientpositive/union10.q.out b/ql/src/test/results/clientpositive/union10.q.out index 57dc13a..85eabf5 100644 --- a/ql/src/test/results/clientpositive/union10.q.out +++ b/ql/src/test/results/clientpositive/union10.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- union case: all subqueries are a map-reduce jobs, 3 way union, same input for all sub-queries, followed by filesink - -create table tmptable(key string, value int) +PREHOOK: query: create table tmptable(key string, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- union case: all subqueries are a map-reduce jobs, 3 way union, same input for all sub-queries, followed by filesink - -create table tmptable(key string, value int) +POSTHOOK: query: create table tmptable(key string, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable diff --git a/ql/src/test/results/clientpositive/union11.q.out b/ql/src/test/results/clientpositive/union11.q.out index 32c7f09..99944d7 100644 --- a/ql/src/test/results/clientpositive/union11.q.out +++ b/ql/src/test/results/clientpositive/union11.q.out @@ -1,17 +1,11 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- union case: all subqueries are a map-reduce jobs, 3 way union, same input for all sub-queries, followed by reducesink - -explain +PREHOOK: query: explain select unionsrc.key, count(1) FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2 UNION ALL select 'tst3' as key, count(1) as value from src s3) unionsrc group by unionsrc.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF --- union case: all subqueries are a map-reduce jobs, 3 way union, same input for all sub-queries, followed by reducesink - -explain +POSTHOOK: query: explain select unionsrc.key, count(1) FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2 diff --git a/ql/src/test/results/clientpositive/union12.q.out b/ql/src/test/results/clientpositive/union12.q.out index ee56f15..1b02d16 100644 --- a/ql/src/test/results/clientpositive/union12.q.out +++ b/ql/src/test/results/clientpositive/union12.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- union case: all subqueries are a map-reduce jobs, 3 way union, different inputs for all sub-queries, followed by filesink - -create table tmptable(key string, value int) +PREHOOK: query: create table tmptable(key string, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- union case: all subqueries are a map-reduce jobs, 3 way union, different inputs for all sub-queries, followed by filesink - -create table tmptable(key string, value int) +POSTHOOK: query: create table tmptable(key string, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable diff --git a/ql/src/test/results/clientpositive/union13.q.out b/ql/src/test/results/clientpositive/union13.q.out index 616892a..31219d4 100644 --- a/ql/src/test/results/clientpositive/union13.q.out +++ b/ql/src/test/results/clientpositive/union13.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- union case: both subqueries are a map-only jobs, same input, followed by filesink - -explain +PREHOOK: query: explain select unionsrc.key, unionsrc.value FROM (select s1.key as key, s1.value as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF --- union case: both subqueries are a map-only jobs, same input, followed by filesink - -explain +POSTHOOK: query: explain select unionsrc.key, unionsrc.value FROM (select s1.key as key, s1.value as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/union14.q.out b/ql/src/test/results/clientpositive/union14.q.out index e94f5f3..9c59283 100644 --- a/ql/src/test/results/clientpositive/union14.q.out +++ b/ql/src/test/results/clientpositive/union14.q.out @@ -1,16 +1,10 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by reducesink - -explain +PREHOOK: query: explain select unionsrc.key, count(1) FROM (select s2.key as key, s2.value as value from src1 s2 UNION ALL select 'tst1' as key, cast(count(1) as string) as value from src s1) unionsrc group by unionsrc.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF --- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by reducesink - -explain +POSTHOOK: query: explain select unionsrc.key, count(1) FROM (select s2.key as key, s2.value as value from src1 s2 UNION ALL select 'tst1' as key, cast(count(1) as string) as value from src s1) diff --git a/ql/src/test/results/clientpositive/union15.q.out b/ql/src/test/results/clientpositive/union15.q.out index f1722e7..323e099 100644 --- a/ql/src/test/results/clientpositive/union15.q.out +++ b/ql/src/test/results/clientpositive/union15.q.out @@ -1,17 +1,11 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by reducesink - -explain +PREHOOK: query: explain select unionsrc.key, count(1) FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src1 s2 UNION ALL select s3.key as key, s3.value as value from src1 s3) unionsrc group by unionsrc.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF --- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by reducesink - -explain +POSTHOOK: query: explain select unionsrc.key, count(1) FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src1 s2 diff --git a/ql/src/test/results/clientpositive/union16.q.out b/ql/src/test/results/clientpositive/union16.q.out index f14cf8a..35bad61 100644 --- a/ql/src/test/results/clientpositive/union16.q.out +++ b/ql/src/test/results/clientpositive/union16.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF -EXPLAIN +PREHOOK: query: EXPLAIN SELECT count(1) FROM ( SELECT key, value FROM src UNION ALL SELECT key, value FROM src UNION ALL @@ -31,8 +30,7 @@ SELECT count(1) FROM ( SELECT key, value FROM src UNION ALL SELECT key, value FROM src) src PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT count(1) FROM ( SELECT key, value FROM src UNION ALL SELECT key, value FROM src UNION ALL diff --git a/ql/src/test/results/clientpositive/union17.q.out b/ql/src/test/results/clientpositive/union17.q.out index 650aef4..1cf760d 100644 --- a/ql/src/test/results/clientpositive/union17.q.out +++ b/ql/src/test/results/clientpositive/union17.q.out @@ -14,20 +14,14 @@ POSTHOOK: query: CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST2 -PREHOOK: query: -- SORT_QUERY_RESULTS --- union case:map-reduce sub-queries followed by multi-table insert - -explain +PREHOOK: query: explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, COUNT(DISTINCT SUBSTR(unionsrc.value,5)) GROUP BY unionsrc.key, unionsrc.value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS --- union case:map-reduce sub-queries followed by multi-table insert - -explain +POSTHOOK: query: explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc diff --git a/ql/src/test/results/clientpositive/union18.q.out b/ql/src/test/results/clientpositive/union18.q.out index 5993280..f38ddb3 100644 --- a/ql/src/test/results/clientpositive/union18.q.out +++ b/ql/src/test/results/clientpositive/union18.q.out @@ -14,22 +14,14 @@ POSTHOOK: query: CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST2 -PREHOOK: query: -- SORT_QUERY_RESULTS - --- union case:map-reduce sub-queries followed by multi-table insert - -explain +PREHOOK: query: explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, unionsrc.value INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, unionsrc.value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- union case:map-reduce sub-queries followed by multi-table insert - -explain +POSTHOOK: query: explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc diff --git a/ql/src/test/results/clientpositive/union19.q.out b/ql/src/test/results/clientpositive/union19.q.out index 89d9c6d..839a05f 100644 --- a/ql/src/test/results/clientpositive/union19.q.out +++ b/ql/src/test/results/clientpositive/union19.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE +PREHOOK: query: CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@DEST1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE +POSTHOOK: query: CREATE TABLE DEST1(key STRING, value STRING) STORED AS TEXTFILE POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST1 @@ -18,18 +14,14 @@ POSTHOOK: query: CREATE TABLE DEST2(key STRING, val1 STRING, val2 STRING) STORED POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@DEST2 -PREHOOK: query: -- union case:map-reduce sub-queries followed by multi-table insert - -explain +PREHOOK: query: explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc INSERT OVERWRITE TABLE DEST1 SELECT unionsrc.key, count(unionsrc.value) group by unionsrc.key INSERT OVERWRITE TABLE DEST2 SELECT unionsrc.key, unionsrc.value, unionsrc.value PREHOOK: type: QUERY -POSTHOOK: query: -- union case:map-reduce sub-queries followed by multi-table insert - -explain +POSTHOOK: query: explain FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc diff --git a/ql/src/test/results/clientpositive/union2.q.out b/ql/src/test/results/clientpositive/union2.q.out index 7539ae6..cf75125 100644 --- a/ql/src/test/results/clientpositive/union2.q.out +++ b/ql/src/test/results/clientpositive/union2.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- union case: both subqueries are map-reduce jobs on same input, followed by reduce sink - -explain +PREHOOK: query: explain select count(1) FROM (select s1.key as key, s1.value as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF --- union case: both subqueries are map-reduce jobs on same input, followed by reduce sink - -explain +POSTHOOK: query: explain select count(1) FROM (select s1.key as key, s1.value as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2) unionsrc POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/union20.q.out b/ql/src/test/results/clientpositive/union20.q.out index 5731432..ae55a8b 100644 --- a/ql/src/test/results/clientpositive/union20.q.out +++ b/ql/src/test/results/clientpositive/union20.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- union :map-reduce sub-queries followed by join - -explain +PREHOOK: query: explain SELECT unionsrc1.key, unionsrc1.value, unionsrc2.key, unionsrc2.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL @@ -12,10 +9,7 @@ JOIN select s4.key as key, s4.value as value from src s4 where s4.key < 10) unionsrc2 ON (unionsrc1.key = unionsrc2.key) PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS --- union :map-reduce sub-queries followed by join - -explain +POSTHOOK: query: explain SELECT unionsrc1.key, unionsrc1.value, unionsrc2.key, unionsrc2.value FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL diff --git a/ql/src/test/results/clientpositive/union21.q.out b/ql/src/test/results/clientpositive/union21.q.out index 85b8245..02a09a3 100644 --- a/ql/src/test/results/clientpositive/union21.q.out +++ b/ql/src/test/results/clientpositive/union21.q.out @@ -1,7 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- union of constants, udf outputs, and columns from text table and thrift table - -explain +PREHOOK: query: explain SELECT key, count(1) FROM ( SELECT '1' as key from src @@ -16,10 +13,7 @@ FROM ( ) union_output GROUP BY key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS --- union of constants, udf outputs, and columns from text table and thrift table - -explain +POSTHOOK: query: explain SELECT key, count(1) FROM ( SELECT '1' as key from src diff --git a/ql/src/test/results/clientpositive/union22.q.out b/ql/src/test/results/clientpositive/union22.q.out index f075148..9e44492 100644 --- a/ql/src/test/results/clientpositive/union22.q.out +++ b/ql/src/test/results/clientpositive/union22.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table dst_union22(k1 string, k2 string, k3 string, k4 string) partitioned by (ds string) +PREHOOK: query: create table dst_union22(k1 string, k2 string, k3 string, k4 string) partitioned by (ds string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@dst_union22 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table dst_union22(k1 string, k2 string, k3 string, k4 string) partitioned by (ds string) +POSTHOOK: query: create table dst_union22(k1 string, k2 string, k3 string, k4 string) partitioned by (ds string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@dst_union22 @@ -48,9 +44,7 @@ POSTHOOK: Lineage: dst_union22_delta PARTITION(ds=1).k2 SIMPLE [(src)src.FieldSc POSTHOOK: Lineage: dst_union22_delta PARTITION(ds=1).k3 SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: dst_union22_delta PARTITION(ds=1).k4 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: dst_union22_delta PARTITION(ds=1).k5 SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -explain extended +PREHOOK: query: explain extended insert overwrite table dst_union22 partition (ds='2') select * from ( @@ -63,9 +57,7 @@ where a.k1 > 20 ) subq PREHOOK: type: QUERY -POSTHOOK: query: -- Since the inputs are small, it should be automatically converted to mapjoin - -explain extended +POSTHOOK: query: explain extended insert overwrite table dst_union22 partition (ds='2') select * from ( diff --git a/ql/src/test/results/clientpositive/union24.q.out b/ql/src/test/results/clientpositive/union24.q.out index 91b582f..ef3a224 100644 --- a/ql/src/test/results/clientpositive/union24.q.out +++ b/ql/src/test/results/clientpositive/union24.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table src2 as select key, count(1) as count from src group by key +PREHOOK: query: create table src2 as select key, count(1) as count from src group by key PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@src2 -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table src2 as select key, count(1) as count from src group by key +POSTHOOK: query: create table src2 as select key, count(1) as count from src group by key POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/union26.q.out b/ql/src/test/results/clientpositive/union26.q.out index b335433..99f4421 100644 --- a/ql/src/test/results/clientpositive/union26.q.out +++ b/ql/src/test/results/clientpositive/union26.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT count(1) as counts, key, @@ -22,9 +20,7 @@ WHERE ds='2008-04-08' and hr='11' ) a group by key, value PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT count(1) as counts, key, diff --git a/ql/src/test/results/clientpositive/union27.q.out b/ql/src/test/results/clientpositive/union27.q.out index f023360..34b46f1 100644 --- a/ql/src/test/results/clientpositive/union27.q.out +++ b/ql/src/test/results/clientpositive/union27.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS -create table jackson_sev_same as select * from src +PREHOOK: query: create table jackson_sev_same as select * from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@jackson_sev_same -POSTHOOK: query: -- SORT_QUERY_RESULTS -create table jackson_sev_same as select * from src +POSTHOOK: query: create table jackson_sev_same as select * from src POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/union3.q.out b/ql/src/test/results/clientpositive/union3.q.out index 730c8b8..57344af 100644 --- a/ql/src/test/results/clientpositive/union3.q.out +++ b/ql/src/test/results/clientpositive/union3.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -explain +PREHOOK: query: explain SELECT * FROM ( SELECT 1 AS id @@ -17,9 +15,7 @@ FROM ( CLUSTER BY id ) a PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -explain +POSTHOOK: query: explain SELECT * FROM ( SELECT 1 AS id diff --git a/ql/src/test/results/clientpositive/union31.q.out b/ql/src/test/results/clientpositive/union31.q.out index bb35d5c..ed1d2e2 100644 --- a/ql/src/test/results/clientpositive/union31.q.out +++ b/ql/src/test/results/clientpositive/union31.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -drop table t1 +PREHOOK: query: drop table t1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -drop table t1 +POSTHOOK: query: drop table t1 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table t2 PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/union32.q.out b/ql/src/test/results/clientpositive/union32.q.out index 73d9340..f0b2087 100644 --- a/ql/src/test/results/clientpositive/union32.q.out +++ b/ql/src/test/results/clientpositive/union32.q.out @@ -1,19 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- This tests various union queries which have columns on one side of the query --- being of double type and those on the other side another - -CREATE TABLE t1 AS SELECT * FROM src WHERE key < 10 +PREHOOK: query: CREATE TABLE t1 AS SELECT * FROM src WHERE key < 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- This tests various union queries which have columns on one side of the query --- being of double type and those on the other side another - -CREATE TABLE t1 AS SELECT * FROM src WHERE key < 10 +POSTHOOK: query: CREATE TABLE t1 AS SELECT * FROM src WHERE key < 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@src POSTHOOK: Output: database:default @@ -32,15 +22,13 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@t2 POSTHOOK: Lineage: t2.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: t2.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- Test simple union with double -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(key AS DOUBLE) AS key FROM t1 UNION ALL SELECT CAST(key AS BIGINT) AS key FROM t2) a PREHOOK: type: QUERY -POSTHOOK: query: -- Test simple union with double -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(key AS DOUBLE) AS key FROM t1 UNION ALL @@ -129,15 +117,13 @@ POSTHOOK: Input: default@t2 8.0 9.0 9.0 -PREHOOK: query: -- Test union with join on the left -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key UNION ALL SELECT CAST(key AS DOUBLE) AS key FROM t2) a PREHOOK: type: QUERY -POSTHOOK: query: -- Test union with join on the left -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key UNION ALL @@ -286,15 +272,13 @@ POSTHOOK: Input: default@t2 8.0 9.0 9.0 -PREHOOK: query: -- Test union with join on the right -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(key AS DOUBLE) AS key FROM t2 UNION ALL SELECT CAST(a.key AS BIGINT) AS key FROM t1 a JOIN t2 b ON a.key = b.key) a PREHOOK: type: QUERY -POSTHOOK: query: -- Test union with join on the right -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(key AS DOUBLE) AS key FROM t2 UNION ALL @@ -443,15 +427,13 @@ POSTHOOK: Input: default@t2 8.0 9.0 9.0 -PREHOOK: query: -- Test union with join on the left selecting multiple columns -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key UNION ALL SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2) a PREHOOK: type: QUERY -POSTHOOK: query: -- Test union with join on the left selecting multiple columns -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key UNION ALL @@ -600,15 +582,13 @@ POSTHOOK: Input: default@t2 8.0 8 9.0 9 9.0 9 -PREHOOK: query: -- Test union with join on the right selecting multiple columns -EXPLAIN +PREHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2 UNION ALL SELECT CAST(a.key AS BIGINT) AS key, CAST(b.key AS CHAR(20)) AS value FROM t1 a JOIN t2 b ON a.key = b.key) a PREHOOK: type: QUERY -POSTHOOK: query: -- Test union with join on the right selecting multiple columns -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT * FROM (SELECT CAST(key AS DOUBLE) AS key, CAST(key AS STRING) AS value FROM t2 UNION ALL diff --git a/ql/src/test/results/clientpositive/union33.q.out b/ql/src/test/results/clientpositive/union33.q.out index f8a6e00..17aeecd 100644 --- a/ql/src/test/results/clientpositive/union33.q.out +++ b/ql/src/test/results/clientpositive/union33.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- This tests that a union all with a map only subquery on one side and a --- subquery involving two map reduce jobs on the other runs correctly. - -CREATE TABLE test_src (key STRING, value STRING) +PREHOOK: query: CREATE TABLE test_src (key STRING, value STRING) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_src -POSTHOOK: query: -- SORT_BEFORE_DIFF --- This tests that a union all with a map only subquery on one side and a --- subquery involving two map reduce jobs on the other runs correctly. - -CREATE TABLE test_src (key STRING, value STRING) +POSTHOOK: query: CREATE TABLE test_src (key STRING, value STRING) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_src diff --git a/ql/src/test/results/clientpositive/union34.q.out b/ql/src/test/results/clientpositive/union34.q.out index 8d7846c..9d59331 100644 --- a/ql/src/test/results/clientpositive/union34.q.out +++ b/ql/src/test/results/clientpositive/union34.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS -create table src10_1 (key string, value string) +PREHOOK: query: create table src10_1 (key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@src10_1 -POSTHOOK: query: -- SORT_QUERY_RESULTS -create table src10_1 (key string, value string) +POSTHOOK: query: create table src10_1 (key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@src10_1 @@ -62,18 +60,14 @@ POSTHOOK: Lineage: src10_3.key SIMPLE [(src)src.FieldSchema(name:key, type:strin POSTHOOK: Lineage: src10_3.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] POSTHOOK: Lineage: src10_4.key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] POSTHOOK: Lineage: src10_4.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] -PREHOOK: query: -- When we convert the Join of sub1 and sub0 into a MapJoin, --- we can use a single MR job to evaluate this entire query. -explain +PREHOOK: query: explain SELECT * FROM ( SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) UNION ALL SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3) sub2 UNION ALL SELECT * FROM src10_4 ) alias0 ) alias1 PREHOOK: type: QUERY -POSTHOOK: query: -- When we convert the Join of sub1 and sub0 into a MapJoin, --- we can use a single MR job to evaluate this entire query. -explain +POSTHOOK: query: explain SELECT * FROM ( SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) UNION ALL @@ -232,22 +226,14 @@ POSTHOOK: Input: default@src10_4 98 val_98 98 val_98 98 val_98 -PREHOOK: query: -- When we do not convert the Join of sub1 and sub0 into a MapJoin, --- we need to use two MR jobs to evaluate this query. --- The first job is for the Join of sub1 and sub2. The second job --- is for the UNION ALL and ORDER BY. -explain +PREHOOK: query: explain SELECT * FROM ( SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) UNION ALL SELECT key,value FROM (SELECT * FROM (SELECT * FROM src10_3) sub2 UNION ALL SELECT * FROM src10_4 ) alias0 ) alias1 PREHOOK: type: QUERY -POSTHOOK: query: -- When we do not convert the Join of sub1 and sub0 into a MapJoin, --- we need to use two MR jobs to evaluate this query. --- The first job is for the Join of sub1 and sub2. The second job --- is for the UNION ALL and ORDER BY. -explain +POSTHOOK: query: explain SELECT * FROM ( SELECT sub1.key,sub1.value FROM (SELECT * FROM src10_1) sub1 JOIN (SELECT * FROM src10_2) sub0 ON (sub0.key = sub1.key) UNION ALL diff --git a/ql/src/test/results/clientpositive/union36.q.out b/ql/src/test/results/clientpositive/union36.q.out index e12590c..f1ed2cf 100644 --- a/ql/src/test/results/clientpositive/union36.q.out +++ b/ql/src/test/results/clientpositive/union36.q.out @@ -1,10 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS -select (x/sum(x) over()) as y from(select cast(1 as decimal(10,0)) as x from (select * from src limit 2)s1 union all select cast(1 as decimal(10,0)) x from (select * from src limit 2) s2 union all select cast('100000000' as decimal(10,0)) x from (select * from src limit 2) s3)u +PREHOOK: query: select (x/sum(x) over()) as y from(select cast(1 as decimal(10,0)) as x from (select * from src limit 2)s1 union all select cast(1 as decimal(10,0)) x from (select * from src limit 2) s2 union all select cast('100000000' as decimal(10,0)) x from (select * from src limit 2) s3)u PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS -select (x/sum(x) over()) as y from(select cast(1 as decimal(10,0)) as x from (select * from src limit 2)s1 union all select cast(1 as decimal(10,0)) x from (select * from src limit 2) s2 union all select cast('100000000' as decimal(10,0)) x from (select * from src limit 2) s3)u +POSTHOOK: query: select (x/sum(x) over()) as y from(select cast(1 as decimal(10,0)) as x from (select * from src limit 2)s1 union all select cast(1 as decimal(10,0)) x from (select * from src limit 2) s2 union all select cast('100000000' as decimal(10,0)) x from (select * from src limit 2) s3)u POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union4.q.out b/ql/src/test/results/clientpositive/union4.q.out index 6ecc0a7..0821589 100644 --- a/ql/src/test/results/clientpositive/union4.q.out +++ b/ql/src/test/results/clientpositive/union4.q.out @@ -1,18 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- union case: both subqueries are map-reduce jobs on same input, followed by filesink - - -create table tmptable(key string, value int) +PREHOOK: query: create table tmptable(key string, value int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- union case: both subqueries are map-reduce jobs on same input, followed by filesink - - -create table tmptable(key string, value int) +POSTHOOK: query: create table tmptable(key string, value int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable diff --git a/ql/src/test/results/clientpositive/union5.q.out b/ql/src/test/results/clientpositive/union5.q.out index 916bb74..8cc52f3 100644 --- a/ql/src/test/results/clientpositive/union5.q.out +++ b/ql/src/test/results/clientpositive/union5.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- union case: both subqueries are map-reduce jobs on same input, followed by reduce sink - -explain +PREHOOK: query: explain select unionsrc.key, count(1) FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2) unionsrc group by unionsrc.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF --- union case: both subqueries are map-reduce jobs on same input, followed by reduce sink - -explain +POSTHOOK: query: explain select unionsrc.key, count(1) FROM (select 'tst1' as key, count(1) as value from src s1 UNION ALL select 'tst2' as key, count(1) as value from src s2) unionsrc group by unionsrc.key diff --git a/ql/src/test/results/clientpositive/union6.q.out b/ql/src/test/results/clientpositive/union6.q.out index 0844165..8448d86 100644 --- a/ql/src/test/results/clientpositive/union6.q.out +++ b/ql/src/test/results/clientpositive/union6.q.out @@ -1,16 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by filesink - -create table tmptable(key string, value string) +PREHOOK: query: create table tmptable(key string, value string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@tmptable -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by filesink - -create table tmptable(key string, value string) +POSTHOOK: query: create table tmptable(key string, value string) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@tmptable diff --git a/ql/src/test/results/clientpositive/union7.q.out b/ql/src/test/results/clientpositive/union7.q.out index 5c1786b..6eb4036 100644 --- a/ql/src/test/results/clientpositive/union7.q.out +++ b/ql/src/test/results/clientpositive/union7.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by reducesink - -explain +PREHOOK: query: explain select unionsrc.key, count(1) FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src1 s2) unionsrc group by unionsrc.key PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF --- union case: 1 subquery is a map-reduce job, different inputs for sub-queries, followed by reducesink - -explain +POSTHOOK: query: explain select unionsrc.key, count(1) FROM (select 'tst1' as key, cast(count(1) as string) as value from src s1 UNION ALL select s2.key as key, s2.value as value from src1 s2) unionsrc group by unionsrc.key diff --git a/ql/src/test/results/clientpositive/union8.q.out b/ql/src/test/results/clientpositive/union8.q.out index d54bc04..f58d54a 100644 --- a/ql/src/test/results/clientpositive/union8.q.out +++ b/ql/src/test/results/clientpositive/union8.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- union case: all subqueries are a map-only jobs, 3 way union, same input for all sub-queries, followed by filesink - -explain +PREHOOK: query: explain select unionsrc.key, unionsrc.value FROM (select s1.key as key, s1.value as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2 UNION ALL select s3.key as key, s3.value as value from src s3) unionsrc PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF --- union case: all subqueries are a map-only jobs, 3 way union, same input for all sub-queries, followed by filesink - -explain +POSTHOOK: query: explain select unionsrc.key, unionsrc.value FROM (select s1.key as key, s1.value as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2 UNION ALL select s3.key as key, s3.value as value from src s3) unionsrc diff --git a/ql/src/test/results/clientpositive/union9.q.out b/ql/src/test/results/clientpositive/union9.q.out index 9faca7f..7e3b7d4 100644 --- a/ql/src/test/results/clientpositive/union9.q.out +++ b/ql/src/test/results/clientpositive/union9.q.out @@ -1,15 +1,9 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF --- union case: all subqueries are a map-only jobs, 3 way union, same input for all sub-queries, followed by reducesink - -explain +PREHOOK: query: explain select count(1) FROM (select s1.key as key, s1.value as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2 UNION ALL select s3.key as key, s3.value as value from src s3) unionsrc PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_BEFORE_DIFF --- union case: all subqueries are a map-only jobs, 3 way union, same input for all sub-queries, followed by reducesink - -explain +POSTHOOK: query: explain select count(1) FROM (select s1.key as key, s1.value as value from src s1 UNION ALL select s2.key as key, s2.value as value from src s2 UNION ALL select s3.key as key, s3.value as value from src s3) unionsrc diff --git a/ql/src/test/results/clientpositive/union_date.q.out b/ql/src/test/results/clientpositive/union_date.q.out index 7ac5c1c..d5a24d5 100644 --- a/ql/src/test/results/clientpositive/union_date.q.out +++ b/ql/src/test/results/clientpositive/union_date.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -drop table union_date_1 +PREHOOK: query: drop table union_date_1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -drop table union_date_1 +POSTHOOK: query: drop table union_date_1 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table union_date_2 PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/union_date_trim.q.out b/ql/src/test/results/clientpositive/union_date_trim.q.out index daa7987..a51a5ac 100644 --- a/ql/src/test/results/clientpositive/union_date_trim.q.out +++ b/ql/src/test/results/clientpositive/union_date_trim.q.out @@ -40,13 +40,11 @@ POSTHOOK: Input: default@src POSTHOOK: Output: default@testdate POSTHOOK: Lineage: testdate.dt EXPRESSION [] POSTHOOK: Lineage: testdate.id SIMPLE [] -PREHOOK: query: --- without the fix following query will throw HiveException: Incompatible types for union operator -insert into table testDate select id, tm from (select id, dt as tm from testDate where id = 1 union all select id, dt as tm from testDate where id = 2 union all select id, cast(trim(Cast (dt as string)) as date) as tm from testDate where id = 3 ) a +PREHOOK: query: insert into table testDate select id, tm from (select id, dt as tm from testDate where id = 1 union all select id, dt as tm from testDate where id = 2 union all select id, cast(trim(Cast (dt as string)) as date) as tm from testDate where id = 3 ) a PREHOOK: type: QUERY PREHOOK: Input: default@testdate PREHOOK: Output: default@testdate -POSTHOOK: query: --- without the fix following query will throw HiveException: Incompatible types for union operator -insert into table testDate select id, tm from (select id, dt as tm from testDate where id = 1 union all select id, dt as tm from testDate where id = 2 union all select id, cast(trim(Cast (dt as string)) as date) as tm from testDate where id = 3 ) a +POSTHOOK: query: insert into table testDate select id, tm from (select id, dt as tm from testDate where id = 1 union all select id, dt as tm from testDate where id = 2 union all select id, cast(trim(Cast (dt as string)) as date) as tm from testDate where id = 3 ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@testdate POSTHOOK: Output: default@testdate diff --git a/ql/src/test/results/clientpositive/union_null.q.out b/ql/src/test/results/clientpositive/union_null.q.out index 519d40f..e196ff3 100644 --- a/ql/src/test/results/clientpositive/union_null.q.out +++ b/ql/src/test/results/clientpositive/union_null.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- SORT_BEFORE_DIFF - --- HIVE-2901 -select x from (select * from (select value as x from src order by x limit 5)a union all select * from (select cast(NULL as string) as x from src limit 5)b )a +PREHOOK: query: select x from (select * from (select value as x from src order by x limit 5)a union all select * from (select cast(NULL as string) as x from src limit 5)b )a PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- SORT_BEFORE_DIFF - --- HIVE-2901 -select x from (select * from (select value as x from src order by x limit 5)a union all select * from (select cast(NULL as string) as x from src limit 5)b )a +POSTHOOK: query: select x from (select * from (select value as x from src order by x limit 5)a union all select * from (select cast(NULL as string) as x from src limit 5)b )a POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### @@ -40,13 +34,11 @@ val_0 val_0 val_10 val_100 -PREHOOK: query: -- HIVE-4837 -select * from (select * from (select cast(null as string) as N from src1 group by key)a UNION ALL select * from (select cast(null as string) as N from src1 group by key)b ) a +PREHOOK: query: select * from (select * from (select cast(null as string) as N from src1 group by key)a UNION ALL select * from (select cast(null as string) as N from src1 group by key)b ) a PREHOOK: type: QUERY PREHOOK: Input: default@src1 #### A masked pattern was here #### -POSTHOOK: query: -- HIVE-4837 -select * from (select * from (select cast(null as string) as N from src1 group by key)a UNION ALL select * from (select cast(null as string) as N from src1 group by key)b ) a +POSTHOOK: query: select * from (select * from (select cast(null as string) as N from src1 group by key)a UNION ALL select * from (select cast(null as string) as N from src1 group by key)b ) a POSTHOOK: type: QUERY POSTHOOK: Input: default@src1 #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/union_paren.q.out b/ql/src/test/results/clientpositive/union_paren.q.out index 3976ac5..6bfd427 100644 --- a/ql/src/test/results/clientpositive/union_paren.q.out +++ b/ql/src/test/results/clientpositive/union_paren.q.out @@ -217,9 +217,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 309 -PREHOOK: query: --similar tpcds q14 - -with cross_items as +PREHOOK: query: with cross_items as (select key, k from src, (select iss.key k @@ -234,9 +232,7 @@ select * from cross_items order by key limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: --similar tpcds q14 - -with cross_items as +POSTHOOK: query: with cross_items as (select key, k from src, (select iss.key k diff --git a/ql/src/test/results/clientpositive/union_ppr.q.out b/ql/src/test/results/clientpositive/union_ppr.q.out index d346010..99d2d3b 100644 --- a/ql/src/test/results/clientpositive/union_ppr.q.out +++ b/ql/src/test/results/clientpositive/union_ppr.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +PREHOOK: query: EXPLAIN EXTENDED SELECT * FROM ( SELECT X.* FROM SRCPART X WHERE X.key < 100 UNION ALL @@ -9,9 +7,7 @@ SELECT * FROM ( WHERE A.ds = '2008-04-08' SORT BY A.key, A.value, A.ds, A.hr PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN EXTENDED +POSTHOOK: query: EXPLAIN EXTENDED SELECT * FROM ( SELECT X.* FROM SRCPART X WHERE X.key < 100 UNION ALL diff --git a/ql/src/test/results/clientpositive/union_remove_1.q.out b/ql/src/test/results/clientpositive/union_remove_1.q.out index 9b35b5f..2be8d57 100644 --- a/ql/src/test/results/clientpositive/union_remove_1.q.out +++ b/ql/src/test/results/clientpositive/union_remove_1.q.out @@ -1,30 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_10.q.out b/ql/src/test/results/clientpositive/union_remove_10.q.out index e98a907..5bf4d51 100644 --- a/ql/src/test/results/clientpositive/union_remove_10.q.out +++ b/ql/src/test/results/clientpositive/union_remove_10.q.out @@ -1,38 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a map-only query, and the --- other one contains a nested union where one of the sub-queries requires a map-reduce --- job), followed by select star and a file sink. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The outer union can be removed completely. --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a map-only query, and the --- other one contains a nested union where one of the sub-queries requires a map-reduce --- job), followed by select star and a file sink. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The outer union can be removed completely. --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_11.q.out b/ql/src/test/results/clientpositive/union_remove_11.q.out index 9820267..89194d2 100644 --- a/ql/src/test/results/clientpositive/union_remove_11.q.out +++ b/ql/src/test/results/clientpositive/union_remove_11.q.out @@ -1,38 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a map-only query, and the --- other one contains a nested union where also contains map only sub-queries), --- followed by select star and a file sink. --- There is no need for the union optimization, since the whole query can be performed --- in a single map-only job --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a map-only query, and the --- other one contains a nested union where also contains map only sub-queries), --- followed by select star and a file sink. --- There is no need for the union optimization, since the whole query can be performed --- in a single map-only job --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_12.q.out b/ql/src/test/results/clientpositive/union_remove_12.q.out index 26887ea..650fb8d 100644 --- a/ql/src/test/results/clientpositive/union_remove_12.q.out +++ b/ql/src/test/results/clientpositive/union_remove_12.q.out @@ -1,36 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a map-only query, and the --- other one is a map-join query), followed by select star and a file sink. --- The union optimization is applied, and the union is removed. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a map-only query, and the --- other one is a map-join query), followed by select star and a file sink. --- The union optimization is applied, and the union is removed. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_13.q.out b/ql/src/test/results/clientpositive/union_remove_13.q.out index d013464..2c584e0 100644 --- a/ql/src/test/results/clientpositive/union_remove_13.q.out +++ b/ql/src/test/results/clientpositive/union_remove_13.q.out @@ -1,36 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a mapred query, and the --- other one is a map-join query), followed by select star and a file sink. --- The union selectstar optimization should be performed, and the union should be removed. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a mapred query, and the --- other one is a map-join query), followed by select star and a file sink. --- The union selectstar optimization should be performed, and the union should be removed. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_14.q.out b/ql/src/test/results/clientpositive/union_remove_14.q.out index 3a76b95..650fb8d 100644 --- a/ql/src/test/results/clientpositive/union_remove_14.q.out +++ b/ql/src/test/results/clientpositive/union_remove_14.q.out @@ -1,38 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a map-only query, and the --- other one contains a join, which should be performed as a map-join query at runtime), --- followed by select star and a file sink. --- The union selectstar optimization should be performed, and the union should be removed. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which is a map-only query, and the --- other one contains a join, which should be performed as a map-join query at runtime), --- followed by select star and a file sink. --- The union selectstar optimization should be performed, and the union should be removed. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_15.q.out b/ql/src/test/results/clientpositive/union_remove_15.q.out index 43a75af..8109837 100644 --- a/ql/src/test/results/clientpositive/union_remove_15.q.out +++ b/ql/src/test/results/clientpositive/union_remove_15.q.out @@ -1,36 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- and the results are written to a table using dynamic partitions. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- This tests demonstrates that this optimization works in the presence of dynamic partitions. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- and the results are written to a table using dynamic partitions. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- This tests demonstrates that this optimization works in the presence of dynamic partitions. - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_16.q.out b/ql/src/test/results/clientpositive/union_remove_16.q.out index 174a1a0..e8f3e47 100644 --- a/ql/src/test/results/clientpositive/union_remove_16.q.out +++ b/ql/src/test/results/clientpositive/union_remove_16.q.out @@ -1,34 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- and the results are written to a table using dynamic partitions. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on --- This test demonstrates that this optimization works in the presence of dynamic partitions. --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- and the results are written to a table using dynamic partitions. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on --- This test demonstrates that this optimization works in the presence of dynamic partitions. --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_17.q.out b/ql/src/test/results/clientpositive/union_remove_17.q.out index 5cde83e..e1c1692 100644 --- a/ql/src/test/results/clientpositive/union_remove_17.q.out +++ b/ql/src/test/results/clientpositive/union_remove_17.q.out @@ -1,30 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- and the results are written to a table using dynamic partitions. --- There is no need for this optimization, since the query is a map-only query. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- and the results are written to a table using dynamic partitions. --- There is no need for this optimization, since the query is a map-only query. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_18.q.out b/ql/src/test/results/clientpositive/union_remove_18.q.out index d5b5a17..8c3ccb4 100644 --- a/ql/src/test/results/clientpositive/union_remove_18.q.out +++ b/ql/src/test/results/clientpositive/union_remove_18.q.out @@ -1,34 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- This test demonstrates that the optimization works with dynamic partitions irrespective of the --- file format of the output file --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, ds string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, ds string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- This test demonstrates that the optimization works with dynamic partitions irrespective of the --- file format of the output file --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, ds string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, ds string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_19.q.out b/ql/src/test/results/clientpositive/union_remove_19.q.out index 20f7e0b..5918afd 100644 --- a/ql/src/test/results/clientpositive/union_remove_19.q.out +++ b/ql/src/test/results/clientpositive/union_remove_19.q.out @@ -1,34 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - --- SORT_QUERY_RESULTS - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - --- SORT_QUERY_RESULTS - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 @@ -230,8 +204,7 @@ POSTHOOK: Input: default@outputtbl1 7 1 8 2 8 2 -PREHOOK: query: -- filter should be fine -explain +PREHOOK: query: explain insert overwrite table outputTbl1 SELECT a.key, a.`values` FROM ( @@ -240,8 +213,7 @@ FROM ( SELECT key, count(1) as `values` from inputTbl1 group by key ) a where a.key = 7 PREHOOK: type: QUERY -POSTHOOK: query: -- filter should be fine -explain +POSTHOOK: query: explain insert overwrite table outputTbl1 SELECT a.key, a.`values` FROM ( @@ -380,8 +352,7 @@ POSTHOOK: Input: default@outputtbl1 #### A masked pattern was here #### 7 1 7 1 -PREHOOK: query: -- filters and sub-queries should be fine -explain +PREHOOK: query: explain insert overwrite table outputTbl1 select key, `values` from ( @@ -393,8 +364,7 @@ FROM ( ) a ) b where b.key >= 7 PREHOOK: type: QUERY -POSTHOOK: query: -- filters and sub-queries should be fine -explain +POSTHOOK: query: explain insert overwrite table outputTbl1 select key, `values` from ( diff --git a/ql/src/test/results/clientpositive/union_remove_2.q.out b/ql/src/test/results/clientpositive/union_remove_2.q.out index b889be5..8a841c0 100644 --- a/ql/src/test/results/clientpositive/union_remove_2.q.out +++ b/ql/src/test/results/clientpositive/union_remove_2.q.out @@ -1,32 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 3 subqueries is performed (exactly one of which requires a map-reduce job) --- followed by select star and a file sink. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 3 subqueries is performed (exactly one of which requires a map-reduce job) --- followed by select star and a file sink. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_20.q.out b/ql/src/test/results/clientpositive/union_remove_20.q.out index 746cbf3..9e78e22 100644 --- a/ql/src/test/results/clientpositive/union_remove_20.q.out +++ b/ql/src/test/results/clientpositive/union_remove_20.q.out @@ -1,32 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select and a file sink --- However, the order of the columns in the select list is different. So, union cannot --- be removed. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23. The union is removed, the select (which changes the order of --- columns being selected) is pushed above the union. - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select and a file sink --- However, the order of the columns in the select list is different. So, union cannot --- be removed. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23. The union is removed, the select (which changes the order of --- columns being selected) is pushed above the union. - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_21.q.out b/ql/src/test/results/clientpositive/union_remove_21.q.out index c143cc8..0c845df 100644 --- a/ql/src/test/results/clientpositive/union_remove_21.q.out +++ b/ql/src/test/results/clientpositive/union_remove_21.q.out @@ -1,32 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select and a file sink --- However, all the columns are not selected. So, union cannot --- be removed. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23. The union is removed, the select (which changes the order of --- columns being selected) is pushed above the union. - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select and a file sink --- However, all the columns are not selected. So, union cannot --- be removed. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23. The union is removed, the select (which changes the order of --- columns being selected) is pushed above the union. - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_22.q.out b/ql/src/test/results/clientpositive/union_remove_22.q.out index d269270..f1e0326 100644 --- a/ql/src/test/results/clientpositive/union_remove_22.q.out +++ b/ql/src/test/results/clientpositive/union_remove_22.q.out @@ -1,32 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select and a file sink --- However, some columns are repeated. So, union cannot be removed. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23. The union is removed, the select (which selects columns from --- both the sub-qeuries of the union) is pushed above the union. - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select and a file sink --- However, some columns are repeated. So, union cannot be removed. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23. The union is removed, the select (which selects columns from --- both the sub-qeuries of the union) is pushed above the union. - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_23.q.out b/ql/src/test/results/clientpositive/union_remove_23.q.out index 585cdbb..5ccc085 100644 --- a/ql/src/test/results/clientpositive/union_remove_23.q.out +++ b/ql/src/test/results/clientpositive/union_remove_23.q.out @@ -1,32 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. One of the sub-queries --- would have multiple map-reduce jobs. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. One of the sub-queries --- would have multiple map-reduce jobs. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_24.q.out b/ql/src/test/results/clientpositive/union_remove_24.q.out index 8e266fd..5cbb0b0 100644 --- a/ql/src/test/results/clientpositive/union_remove_24.q.out +++ b/ql/src/test/results/clientpositive/union_remove_24.q.out @@ -1,28 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- One sub-query has a double and the other sub-query has a bigint. --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- One sub-query has a double and the other sub-query has a bigint. --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_25.q.out b/ql/src/test/results/clientpositive/union_remove_25.q.out index 2785022..f967fc6 100644 --- a/ql/src/test/results/clientpositive/union_remove_25.q.out +++ b/ql/src/test/results/clientpositive/union_remove_25.q.out @@ -1,30 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_3.q.out b/ql/src/test/results/clientpositive/union_remove_3.q.out index 5e117bd..5b43804 100644 --- a/ql/src/test/results/clientpositive/union_remove_3.q.out +++ b/ql/src/test/results/clientpositive/union_remove_3.q.out @@ -1,32 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->remove->filesink optimization --- Union of 3 subqueries is performed (all of which are map-only queries) --- followed by select star and a file sink. --- There is no need for any optimization, since the whole query can be processed in --- a single map-only job --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->remove->filesink optimization --- Union of 3 subqueries is performed (all of which are map-only queries) --- followed by select star and a file sink. --- There is no need for any optimization, since the whole query can be processed in --- a single map-only job --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_4.q.out b/ql/src/test/results/clientpositive/union_remove_4.q.out index 0004ed0..593bb6e 100644 --- a/ql/src/test/results/clientpositive/union_remove_4.q.out +++ b/ql/src/test/results/clientpositive/union_remove_4.q.out @@ -1,30 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_5.q.out b/ql/src/test/results/clientpositive/union_remove_5.q.out index f88d1d2..aaf4654 100644 --- a/ql/src/test/results/clientpositive/union_remove_5.q.out +++ b/ql/src/test/results/clientpositive/union_remove_5.q.out @@ -1,34 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 3 subqueries is performed (exactly one of which requires a map-reduce job) --- followed by select star and a file sink. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 3 subqueries is performed (exactly one of which requires a map-reduce job) --- followed by select star and a file sink. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_6.q.out b/ql/src/test/results/clientpositive/union_remove_6.q.out index 0a80e63..fee4614 100644 --- a/ql/src/test/results/clientpositive/union_remove_6.q.out +++ b/ql/src/test/results/clientpositive/union_remove_6.q.out @@ -1,24 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (all of which are mapred queries) --- followed by select star and a file sink in 2 output tables. --- The optimiaztion does not take affect since it is a multi-table insert. --- It does not matter, whether the output is merged or not. In this case, --- merging is turned off - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (all of which are mapred queries) --- followed by select star and a file sink in 2 output tables. --- The optimiaztion does not take affect since it is a multi-table insert. --- It does not matter, whether the output is merged or not. In this case, --- merging is turned off - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_6_subq.q.out b/ql/src/test/results/clientpositive/union_remove_6_subq.q.out index 6439a0f..5ab1873 100644 --- a/ql/src/test/results/clientpositive/union_remove_6_subq.q.out +++ b/ql/src/test/results/clientpositive/union_remove_6_subq.q.out @@ -1,26 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (all of which are mapred queries) --- followed by select star and a file sink in 2 output tables. --- The optimiaztion does not take affect since it is a multi-table insert. --- It does not matter, whether the output is merged or not. In this case, --- merging is turned off - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (all of which are mapred queries) --- followed by select star and a file sink in 2 output tables. --- The optimiaztion does not take affect since it is a multi-table insert. --- It does not matter, whether the output is merged or not. In this case, --- merging is turned off - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 @@ -278,16 +260,14 @@ POSTHOOK: Input: default@outputtbl2 7 1 8 2 8 2 -PREHOOK: query: -- The following queries guarantee the correctness. -explain +PREHOOK: query: explain select avg(c) from( SELECT count(1)-200 as c from src UNION ALL SELECT count(1) as c from src )subq PREHOOK: type: QUERY -POSTHOOK: query: -- The following queries guarantee the correctness. -explain +POSTHOOK: query: explain select avg(c) from( SELECT count(1)-200 as c from src UNION ALL diff --git a/ql/src/test/results/clientpositive/union_remove_7.q.out b/ql/src/test/results/clientpositive/union_remove_7.q.out index 0a2dc62..5bdd4ba 100644 --- a/ql/src/test/results/clientpositive/union_remove_7.q.out +++ b/ql/src/test/results/clientpositive/union_remove_7.q.out @@ -1,34 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 map-reduce subqueries is performed followed by select star and a file sink --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_8.q.out b/ql/src/test/results/clientpositive/union_remove_8.q.out index 3003b3c..aedec8b 100644 --- a/ql/src/test/results/clientpositive/union_remove_8.q.out +++ b/ql/src/test/results/clientpositive/union_remove_8.q.out @@ -1,36 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 3 subqueries is performed (exactly one of which requires a map-reduce job) --- followed by select star and a file sink. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 3 subqueries is performed (exactly one of which requires a map-reduce job) --- followed by select star and a file sink. --- There is no need to write the temporary results of the sub-queries, and then read them --- again to process the union. The union can be removed completely. --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- off - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_remove_9.q.out b/ql/src/test/results/clientpositive/union_remove_9.q.out index deb2a34..af7c2e4 100644 --- a/ql/src/test/results/clientpositive/union_remove_9.q.out +++ b/ql/src/test/results/clientpositive/union_remove_9.q.out @@ -1,34 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which contains a union and is map-only), --- and the other one is a map-reduce query followed by select star and a file sink. --- There is no need for the outer union. --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +PREHOOK: query: create table inputTbl1(key string, val string) stored as textfile PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@inputTbl1 -POSTHOOK: query: -- SORT_QUERY_RESULTS --- This is to test the union->selectstar->filesink optimization --- Union of 2 subqueries is performed (one of which contains a union and is map-only), --- and the other one is a map-reduce query followed by select star and a file sink. --- There is no need for the outer union. --- The final file format is different from the input and intermediate file format. --- It does not matter, whether the output is merged or not. In this case, merging is turned --- on - --- INCLUDE_HADOOP_MAJOR_VERSIONS(0.23) --- Since this test creates sub-directories for the output table outputTbl1, it might be easier --- to run the test only on hadoop 23 - -create table inputTbl1(key string, val string) stored as textfile +POSTHOOK: query: create table inputTbl1(key string, val string) stored as textfile POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@inputTbl1 diff --git a/ql/src/test/results/clientpositive/union_script.q.out b/ql/src/test/results/clientpositive/union_script.q.out index 44ea01b..b8dfeb1 100644 --- a/ql/src/test/results/clientpositive/union_script.q.out +++ b/ql/src/test/results/clientpositive/union_script.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS -select * from ( +PREHOOK: query: select * from ( select transform(key) using 'cat' as cola from src)s PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS -select * from ( +POSTHOOK: query: select * from ( select transform(key) using 'cat' as cola from src)s POSTHOOK: type: QUERY POSTHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out b/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out index 0a11e23..0e8b897 100644 --- a/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out +++ b/ql/src/test/results/clientpositive/unionall_unbalancedppd.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -drop table if exists union_all_bug_test_1 +PREHOOK: query: drop table if exists union_all_bug_test_1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -drop table if exists union_all_bug_test_1 +POSTHOOK: query: drop table if exists union_all_bug_test_1 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table if exists union_all_bug_test_2 PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/unset_table_view_property.q.out b/ql/src/test/results/clientpositive/unset_table_view_property.q.out index a3dec73..f306857 100644 --- a/ql/src/test/results/clientpositive/unset_table_view_property.q.out +++ b/ql/src/test/results/clientpositive/unset_table_view_property.q.out @@ -22,13 +22,11 @@ numRows 0 rawDataSize 0 totalSize 0 #### A masked pattern was here #### -PREHOOK: query: -- UNSET TABLE PROPERTIES -ALTER TABLE vt.testTable SET TBLPROPERTIES ('a'='1', 'c'='3') +PREHOOK: query: ALTER TABLE vt.testTable SET TBLPROPERTIES ('a'='1', 'c'='3') PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: vt@testtable PREHOOK: Output: vt@testtable -POSTHOOK: query: -- UNSET TABLE PROPERTIES -ALTER TABLE vt.testTable SET TBLPROPERTIES ('a'='1', 'c'='3') +POSTHOOK: query: ALTER TABLE vt.testTable SET TBLPROPERTIES ('a'='1', 'c'='3') POSTHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: Input: vt@testtable POSTHOOK: Output: vt@testtable @@ -44,13 +42,11 @@ numRows 0 rawDataSize 0 totalSize 0 #### A masked pattern was here #### -PREHOOK: query: -- UNSET all the properties -ALTER TABLE vt.testTable UNSET TBLPROPERTIES ('a', 'c') +PREHOOK: query: ALTER TABLE vt.testTable UNSET TBLPROPERTIES ('a', 'c') PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: vt@testtable PREHOOK: Output: vt@testtable -POSTHOOK: query: -- UNSET all the properties -ALTER TABLE vt.testTable UNSET TBLPROPERTIES ('a', 'c') +POSTHOOK: query: ALTER TABLE vt.testTable UNSET TBLPROPERTIES ('a', 'c') POSTHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: Input: vt@testtable POSTHOOK: Output: vt@testtable @@ -85,13 +81,11 @@ numRows 0 rawDataSize 0 totalSize 0 #### A masked pattern was here #### -PREHOOK: query: -- UNSET a subset of the properties -ALTER TABLE vt.testTable UNSET TBLPROPERTIES ('a', 'd') +PREHOOK: query: ALTER TABLE vt.testTable UNSET TBLPROPERTIES ('a', 'd') PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: vt@testtable PREHOOK: Output: vt@testtable -POSTHOOK: query: -- UNSET a subset of the properties -ALTER TABLE vt.testTable UNSET TBLPROPERTIES ('a', 'd') +POSTHOOK: query: ALTER TABLE vt.testTable UNSET TBLPROPERTIES ('a', 'd') POSTHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: Input: vt@testtable POSTHOOK: Output: vt@testtable @@ -106,13 +100,11 @@ numRows 0 rawDataSize 0 totalSize 0 #### A masked pattern was here #### -PREHOOK: query: -- the same property being UNSET multiple times -ALTER TABLE vt.testTable UNSET TBLPROPERTIES ('c', 'c', 'c') +PREHOOK: query: ALTER TABLE vt.testTable UNSET TBLPROPERTIES ('c', 'c', 'c') PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: vt@testtable PREHOOK: Output: vt@testtable -POSTHOOK: query: -- the same property being UNSET multiple times -ALTER TABLE vt.testTable UNSET TBLPROPERTIES ('c', 'c', 'c') +POSTHOOK: query: ALTER TABLE vt.testTable UNSET TBLPROPERTIES ('c', 'c', 'c') POSTHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: Input: vt@testtable POSTHOOK: Output: vt@testtable @@ -148,13 +140,11 @@ numRows 0 rawDataSize 0 totalSize 0 #### A masked pattern was here #### -PREHOOK: query: -- UNSET a subset of the properties and some non-existed properties using IF EXISTS -ALTER TABLE vt.testTable UNSET TBLPROPERTIES IF EXISTS ('b', 'd', 'b', 'f') +PREHOOK: query: ALTER TABLE vt.testTable UNSET TBLPROPERTIES IF EXISTS ('b', 'd', 'b', 'f') PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: vt@testtable PREHOOK: Output: vt@testtable -POSTHOOK: query: -- UNSET a subset of the properties and some non-existed properties using IF EXISTS -ALTER TABLE vt.testTable UNSET TBLPROPERTIES IF EXISTS ('b', 'd', 'b', 'f') +POSTHOOK: query: ALTER TABLE vt.testTable UNSET TBLPROPERTIES IF EXISTS ('b', 'd', 'b', 'f') POSTHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: Input: vt@testtable POSTHOOK: Output: vt@testtable @@ -170,13 +160,11 @@ numRows 0 rawDataSize 0 totalSize 0 #### A masked pattern was here #### -PREHOOK: query: -- UNSET a subset of the properties and some non-existed properties using IF EXISTS -ALTER TABLE vt.testTable UNSET TBLPROPERTIES IF EXISTS ('b', 'd', 'c', 'f', 'x', 'y', 'z') +PREHOOK: query: ALTER TABLE vt.testTable UNSET TBLPROPERTIES IF EXISTS ('b', 'd', 'c', 'f', 'x', 'y', 'z') PREHOOK: type: ALTERTABLE_PROPERTIES PREHOOK: Input: vt@testtable PREHOOK: Output: vt@testtable -POSTHOOK: query: -- UNSET a subset of the properties and some non-existed properties using IF EXISTS -ALTER TABLE vt.testTable UNSET TBLPROPERTIES IF EXISTS ('b', 'd', 'c', 'f', 'x', 'y', 'z') +POSTHOOK: query: ALTER TABLE vt.testTable UNSET TBLPROPERTIES IF EXISTS ('b', 'd', 'c', 'f', 'x', 'y', 'z') POSTHOOK: type: ALTERTABLE_PROPERTIES POSTHOOK: Input: vt@testtable POSTHOOK: Output: vt@testtable @@ -199,14 +187,12 @@ POSTHOOK: query: DROP TABLE vt.testTable POSTHOOK: type: DROPTABLE POSTHOOK: Input: vt@testtable POSTHOOK: Output: vt@testtable -PREHOOK: query: -- UNSET VIEW PROPERTIES -CREATE VIEW vt.testView AS SELECT value FROM src WHERE key=86 +PREHOOK: query: CREATE VIEW vt.testView AS SELECT value FROM src WHERE key=86 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@src PREHOOK: Output: database:vt PREHOOK: Output: vt@testView -POSTHOOK: query: -- UNSET VIEW PROPERTIES -CREATE VIEW vt.testView AS SELECT value FROM src WHERE key=86 +POSTHOOK: query: CREATE VIEW vt.testView AS SELECT value FROM src WHERE key=86 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@src POSTHOOK: Output: database:vt @@ -227,13 +213,11 @@ POSTHOOK: type: SHOW_TBLPROPERTIES propA 100 propB 200 #### A masked pattern was here #### -PREHOOK: query: -- UNSET all the properties -ALTER VIEW vt.testView UNSET TBLPROPERTIES ('propA', 'propB') +PREHOOK: query: ALTER VIEW vt.testView UNSET TBLPROPERTIES ('propA', 'propB') PREHOOK: type: ALTERVIEW_PROPERTIES PREHOOK: Input: vt@testview PREHOOK: Output: vt@testview -POSTHOOK: query: -- UNSET all the properties -ALTER VIEW vt.testView UNSET TBLPROPERTIES ('propA', 'propB') +POSTHOOK: query: ALTER VIEW vt.testView UNSET TBLPROPERTIES ('propA', 'propB') POSTHOOK: type: ALTERVIEW_PROPERTIES POSTHOOK: Input: vt@testview POSTHOOK: Output: vt@testview @@ -259,13 +243,11 @@ propA 100 propC 300 propD 400 #### A masked pattern was here #### -PREHOOK: query: -- UNSET a subset of the properties -ALTER VIEW vt.testView UNSET TBLPROPERTIES ('propA', 'propC') +PREHOOK: query: ALTER VIEW vt.testView UNSET TBLPROPERTIES ('propA', 'propC') PREHOOK: type: ALTERVIEW_PROPERTIES PREHOOK: Input: vt@testview PREHOOK: Output: vt@testview -POSTHOOK: query: -- UNSET a subset of the properties -ALTER VIEW vt.testView UNSET TBLPROPERTIES ('propA', 'propC') +POSTHOOK: query: ALTER VIEW vt.testView UNSET TBLPROPERTIES ('propA', 'propC') POSTHOOK: type: ALTERVIEW_PROPERTIES POSTHOOK: Input: vt@testview POSTHOOK: Output: vt@testview @@ -276,13 +258,11 @@ POSTHOOK: type: SHOW_TBLPROPERTIES #### A masked pattern was here #### propD 400 #### A masked pattern was here #### -PREHOOK: query: -- the same property being UNSET multiple times -ALTER VIEW vt.testView UNSET TBLPROPERTIES ('propD', 'propD', 'propD') +PREHOOK: query: ALTER VIEW vt.testView UNSET TBLPROPERTIES ('propD', 'propD', 'propD') PREHOOK: type: ALTERVIEW_PROPERTIES PREHOOK: Input: vt@testview PREHOOK: Output: vt@testview -POSTHOOK: query: -- the same property being UNSET multiple times -ALTER VIEW vt.testView UNSET TBLPROPERTIES ('propD', 'propD', 'propD') +POSTHOOK: query: ALTER VIEW vt.testView UNSET TBLPROPERTIES ('propD', 'propD', 'propD') POSTHOOK: type: ALTERVIEW_PROPERTIES POSTHOOK: Input: vt@testview POSTHOOK: Output: vt@testview @@ -309,13 +289,11 @@ propB 200 propC 300 propD 400 #### A masked pattern was here #### -PREHOOK: query: -- UNSET a subset of the properties and some non-existed properties using IF EXISTS -ALTER VIEW vt.testView UNSET TBLPROPERTIES IF EXISTS ('propC', 'propD', 'propD', 'propC', 'propZ') +PREHOOK: query: ALTER VIEW vt.testView UNSET TBLPROPERTIES IF EXISTS ('propC', 'propD', 'propD', 'propC', 'propZ') PREHOOK: type: ALTERVIEW_PROPERTIES PREHOOK: Input: vt@testview PREHOOK: Output: vt@testview -POSTHOOK: query: -- UNSET a subset of the properties and some non-existed properties using IF EXISTS -ALTER VIEW vt.testView UNSET TBLPROPERTIES IF EXISTS ('propC', 'propD', 'propD', 'propC', 'propZ') +POSTHOOK: query: ALTER VIEW vt.testView UNSET TBLPROPERTIES IF EXISTS ('propC', 'propD', 'propD', 'propC', 'propZ') POSTHOOK: type: ALTERVIEW_PROPERTIES POSTHOOK: Input: vt@testview POSTHOOK: Output: vt@testview @@ -327,13 +305,11 @@ POSTHOOK: type: SHOW_TBLPROPERTIES propA 100 propB 200 #### A masked pattern was here #### -PREHOOK: query: -- UNSET a subset of the properties and some non-existed properties using IF EXISTS -ALTER VIEW vt.testView UNSET TBLPROPERTIES IF EXISTS ('propB', 'propC', 'propD', 'propF') +PREHOOK: query: ALTER VIEW vt.testView UNSET TBLPROPERTIES IF EXISTS ('propB', 'propC', 'propD', 'propF') PREHOOK: type: ALTERVIEW_PROPERTIES PREHOOK: Input: vt@testview PREHOOK: Output: vt@testview -POSTHOOK: query: -- UNSET a subset of the properties and some non-existed properties using IF EXISTS -ALTER VIEW vt.testView UNSET TBLPROPERTIES IF EXISTS ('propB', 'propC', 'propD', 'propF') +POSTHOOK: query: ALTER VIEW vt.testView UNSET TBLPROPERTIES IF EXISTS ('propB', 'propC', 'propD', 'propF') POSTHOOK: type: ALTERVIEW_PROPERTIES POSTHOOK: Input: vt@testview POSTHOOK: Output: vt@testview diff --git a/ql/src/test/results/clientpositive/varchar_cast.q.out b/ql/src/test/results/clientpositive/varchar_cast.q.out index 5a968f2..c2cbe78 100644 --- a/ql/src/test/results/clientpositive/varchar_cast.q.out +++ b/ql/src/test/results/clientpositive/varchar_cast.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- Cast from varchar to other data types -select +PREHOOK: query: select cast(cast('11' as string) as tinyint), cast(cast('11' as string) as smallint), cast(cast('11' as string) as int), @@ -11,8 +10,7 @@ from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Cast from varchar to other data types -select +POSTHOOK: query: select cast(cast('11' as string) as tinyint), cast(cast('11' as string) as smallint), cast(cast('11' as string) as int), @@ -80,16 +78,14 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 2011-01-01 2011-01-01 01:02:03 -PREHOOK: query: -- no tests from string/varchar to boolean, that conversion doesn't look useful -select +PREHOOK: query: select cast(cast('abc123' as string) as string), cast(cast('abc123' as string) as varchar(10)) from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- no tests from string/varchar to boolean, that conversion doesn't look useful -select +POSTHOOK: query: select cast(cast('abc123' as string) as string), cast(cast('abc123' as string) as varchar(10)) from src limit 1 @@ -112,8 +108,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### abc123 abc123 -PREHOOK: query: -- cast from other types to varchar -select +PREHOOK: query: select cast(cast(11 as tinyint) as string), cast(cast(11 as smallint) as string), cast(cast(11 as int) as string), @@ -125,8 +120,7 @@ from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- cast from other types to varchar -select +POSTHOOK: query: select cast(cast(11 as tinyint) as string), cast(cast(11 as smallint) as string), cast(cast(11 as int) as string), diff --git a/ql/src/test/results/clientpositive/varchar_comparison.q.out b/ql/src/test/results/clientpositive/varchar_comparison.q.out index e2c7aaf..236c4b1 100644 --- a/ql/src/test/results/clientpositive/varchar_comparison.q.out +++ b/ql/src/test/results/clientpositive/varchar_comparison.q.out @@ -1,5 +1,4 @@ -PREHOOK: query: -- Should all be true -select +PREHOOK: query: select cast('abc' as varchar(10)) = cast('abc' as varchar(10)), cast('abc' as varchar(10)) <= cast('abc' as varchar(10)), cast('abc' as varchar(10)) >= cast('abc' as varchar(10)), @@ -10,8 +9,7 @@ from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Should all be true -select +POSTHOOK: query: select cast('abc' as varchar(10)) = cast('abc' as varchar(10)), cast('abc' as varchar(10)) <= cast('abc' as varchar(10)), cast('abc' as varchar(10)) >= cast('abc' as varchar(10)), @@ -23,8 +21,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### true true true true true true -PREHOOK: query: -- Different varchar lengths should still compare the same -select +PREHOOK: query: select cast('abc' as varchar(10)) = cast('abc' as varchar(3)), cast('abc' as varchar(10)) <= cast('abc' as varchar(3)), cast('abc' as varchar(10)) >= cast('abc' as varchar(3)), @@ -35,8 +32,7 @@ from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Different varchar lengths should still compare the same -select +POSTHOOK: query: select cast('abc' as varchar(10)) = cast('abc' as varchar(3)), cast('abc' as varchar(10)) <= cast('abc' as varchar(3)), cast('abc' as varchar(10)) >= cast('abc' as varchar(3)), @@ -48,8 +44,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### true true true true true true -PREHOOK: query: -- Should work with string types as well -select +PREHOOK: query: select cast('abc' as varchar(10)) = 'abc', cast('abc' as varchar(10)) <= 'abc', cast('abc' as varchar(10)) >= 'abc', @@ -60,8 +55,7 @@ from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- Should work with string types as well -select +POSTHOOK: query: select cast('abc' as varchar(10)) = 'abc', cast('abc' as varchar(10)) <= 'abc', cast('abc' as varchar(10)) >= 'abc', @@ -73,30 +67,26 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### true true true true true true -PREHOOK: query: -- leading space is significant for varchar -select +PREHOOK: query: select cast(' abc' as varchar(10)) <> cast('abc' as varchar(10)) from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- leading space is significant for varchar -select +POSTHOOK: query: select cast(' abc' as varchar(10)) <> cast('abc' as varchar(10)) from src limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### true -PREHOOK: query: -- trailing space is significant for varchar -select +PREHOOK: query: select cast('abc ' as varchar(10)) <> cast('abc' as varchar(10)) from src limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: -- trailing space is significant for varchar -select +POSTHOOK: query: select cast('abc ' as varchar(10)) <> cast('abc' as varchar(10)) from src limit 1 POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/vector_between_columns.q.out b/ql/src/test/results/clientpositive/vector_between_columns.q.out index a56f2d3..c2da514 100644 --- a/ql/src/test/results/clientpositive/vector_between_columns.q.out +++ b/ql/src/test/results/clientpositive/vector_between_columns.q.out @@ -1,17 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS --- --- Verify the VectorUDFAdaptor to GenericUDFBetween works for PROJECTION and FILTER. --- -create table if not exists TSINT_txt ( RNUM int , CSINT smallint ) +PREHOOK: query: create table if not exists TSINT_txt ( RNUM int , CSINT smallint ) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@TSINT_txt -POSTHOOK: query: -- SORT_QUERY_RESULTS --- --- Verify the VectorUDFAdaptor to GenericUDFBetween works for PROJECTION and FILTER. --- -create table if not exists TSINT_txt ( RNUM int , CSINT smallint ) +POSTHOOK: query: create table if not exists TSINT_txt ( RNUM int , CSINT smallint ) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out b/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out index c69b7af..6f01b52 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out @@ -145,16 +145,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) +PREHOOK: query: select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@t2 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) +POSTHOOK: query: select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 diff --git a/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out b/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out index 701e0ce..0a9ff22 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out @@ -12,9 +12,7 @@ POSTHOOK: Lineage: decimal_test.cbigint SIMPLE [(alltypesorc)alltypesorc.FieldSc POSTHOOK: Lineage: decimal_test.cdecimal1 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] POSTHOOK: Lineage: decimal_test.cdecimal2 EXPRESSION [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] POSTHOOK: Lineage: decimal_test.cdouble SIMPLE [(alltypesorc)alltypesorc.FieldSchema(name:cdouble, type:double, comment:null), ] -PREHOOK: query: -- Test math functions in vectorized mode to verify they run correctly end-to-end. - -explain +PREHOOK: query: explain select cdecimal1 ,Round(cdecimal1, 2) @@ -46,14 +44,12 @@ select -- Test nesting ,cos(-sin(log(cdecimal1)) + 3.14159) from decimal_test --- limit output to a reasonably small number of rows + where cbigint % 500 = 0 --- test use of a math function in the WHERE clause + and sin(cdecimal1) >= -1.0 PREHOOK: type: QUERY -POSTHOOK: query: -- Test math functions in vectorized mode to verify they run correctly end-to-end. - -explain +POSTHOOK: query: explain select cdecimal1 ,Round(cdecimal1, 2) @@ -85,9 +81,9 @@ select -- Test nesting ,cos(-sin(log(cdecimal1)) + 3.14159) from decimal_test --- limit output to a reasonably small number of rows + where cbigint % 500 = 0 --- test use of a math function in the WHERE clause + and sin(cdecimal1) >= -1.0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -153,9 +149,9 @@ PREHOOK: query: select -- Test nesting ,cos(-sin(log(cdecimal1)) + 3.14159) from decimal_test --- limit output to a reasonably small number of rows + where cbigint % 500 = 0 --- test use of a math function in the WHERE clause + and sin(cdecimal1) >= -1.0 PREHOOK: type: QUERY PREHOOK: Input: default@decimal_test @@ -191,9 +187,9 @@ POSTHOOK: query: select -- Test nesting ,cos(-sin(log(cdecimal1)) + 3.14159) from decimal_test --- limit output to a reasonably small number of rows + where cbigint % 500 = 0 --- test use of a math function in the WHERE clause + and sin(cdecimal1) >= -1.0 POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_test diff --git a/ql/src/test/results/clientpositive/vector_left_outer_join2.q.out b/ql/src/test/results/clientpositive/vector_left_outer_join2.q.out index 67725bd..a1257ce 100644 --- a/ql/src/test/results/clientpositive/vector_left_outer_join2.q.out +++ b/ql/src/test/results/clientpositive/vector_left_outer_join2.q.out @@ -1,10 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -drop table if exists TJOIN1 +PREHOOK: query: drop table if exists TJOIN1 PREHOOK: type: DROPTABLE -POSTHOOK: query: -- SORT_QUERY_RESULTS - -drop table if exists TJOIN1 +POSTHOOK: query: drop table if exists TJOIN1 POSTHOOK: type: DROPTABLE PREHOOK: query: drop table if exists TJOIN2 PREHOOK: type: DROPTABLE diff --git a/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out b/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out index b2b9a3b..1dec224 100644 --- a/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out +++ b/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out @@ -1,22 +1,10 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- Verify HIVE-8097 with a query that has a Vectorized MapJoin in the Reducer. --- Query copied from subquery_in.q - --- non agg, non corr, with join in Parent Query -explain +PREHOOK: query: explain select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR') PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- Verify HIVE-8097 with a query that has a Vectorized MapJoin in the Reducer. --- Query copied from subquery_in.q - --- non agg, non corr, with join in Parent Query -explain +POSTHOOK: query: explain select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and @@ -298,15 +286,13 @@ POSTHOOK: Input: default@lineitem 61336 8855 64128 9141 82704 7721 -PREHOOK: query: -- non agg, corr, with join in Parent Query -explain +PREHOOK: query: explain select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and li.l_orderkey in (select l_orderkey from lineitem where l_shipmode = 'AIR' and l_linenumber = li.l_linenumber) PREHOOK: type: QUERY -POSTHOOK: query: -- non agg, corr, with join in Parent Query -explain +POSTHOOK: query: explain select p.p_partkey, li.l_suppkey from (select distinct l_partkey as p_partkey from lineitem) p join lineitem li on p.p_partkey = li.l_partkey where li.l_linenumber = 1 and diff --git a/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out b/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out index 8c1acc8..e5a62a3 100644 --- a/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out +++ b/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out @@ -208,10 +208,7 @@ stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@store -PREHOOK: query: -- For MR, we are verifying this query DOES NOT vectorize the Map vertex with --- the 2 TableScanOperators that have different schema. - -explain select +PREHOOK: query: explain select s_state, count(1) from store_sales, store, @@ -223,10 +220,7 @@ explain select order by s_state limit 100 PREHOOK: type: QUERY -POSTHOOK: query: -- For MR, we are verifying this query DOES NOT vectorize the Map vertex with --- the 2 TableScanOperators that have different schema. - -explain select +POSTHOOK: query: explain select s_state, count(1) from store_sales, store, diff --git a/ql/src/test/results/clientpositive/vector_null_projection.q.out b/ql/src/test/results/clientpositive/vector_null_projection.q.out index d4ff550..aa923a6 100644 --- a/ql/src/test/results/clientpositive/vector_null_projection.q.out +++ b/ql/src/test/results/clientpositive/vector_null_projection.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table a(s string) stored as orc +PREHOOK: query: create table a(s string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@a -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table a(s string) stored as orc +POSTHOOK: query: create table a(s string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@a @@ -32,12 +28,10 @@ POSTHOOK: query: insert into table b values('aaa') POSTHOOK: type: QUERY POSTHOOK: Output: default@b POSTHOOK: Lineage: b.s SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ] -PREHOOK: query: -- We expect no vectorization due to NULL (void) projection type. -explain +PREHOOK: query: explain select NULL from a PREHOOK: type: QUERY -POSTHOOK: query: -- We expect no vectorization due to NULL (void) projection type. -explain +POSTHOOK: query: explain select NULL from a POSTHOOK: type: QUERY STAGE DEPENDENCIES: diff --git a/ql/src/test/results/clientpositive/vector_outer_join0.q.out b/ql/src/test/results/clientpositive/vector_outer_join0.q.out index 4d92e0c..a8765cd 100644 --- a/ql/src/test/results/clientpositive/vector_outer_join0.q.out +++ b/ql/src/test/results/clientpositive/vector_outer_join0.q.out @@ -125,16 +125,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c +PREHOOK: query: select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c PREHOOK: type: QUERY PREHOOK: Input: default@orc_table_1 PREHOOK: Input: default@orc_table_2 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c +POSTHOOK: query: select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 left outer join orc_table_2 t2 on t1.a = t2.c POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_table_1 POSTHOOK: Input: default@orc_table_2 @@ -212,16 +208,12 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c +PREHOOK: query: select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c PREHOOK: type: QUERY PREHOOK: Input: default@orc_table_1 PREHOOK: Input: default@orc_table_2 #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c +POSTHOOK: query: select t1.v1, t1.a, t2.c, t2.v2 from orc_table_1 t1 right outer join orc_table_2 t2 on t1.a = t2.c POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_table_1 POSTHOOK: Input: default@orc_table_2 diff --git a/ql/src/test/results/clientpositive/vector_outer_join1.q.out b/ql/src/test/results/clientpositive/vector_outer_join1.q.out index c015578..32c335d 100644 --- a/ql/src/test/results/clientpositive/vector_outer_join1.q.out +++ b/ql/src/test/results/clientpositive/vector_outer_join1.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Using cint and ctinyint in test queries -create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +PREHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default PREHOOK: Output: default@small_alltypesorc1a -POSTHOOK: query: -- Using cint and ctinyint in test queries -create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +POSTHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default @@ -289,18 +287,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select * +PREHOOK: query: select * from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cint = c.cint PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_a #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select * +POSTHOOK: query: select * from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cint = c.cint @@ -399,18 +393,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select c.ctinyint +PREHOOK: query: select c.ctinyint from small_alltypesorc_a c left outer join small_alltypesorc_a hd on hd.ctinyint = c.ctinyint PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_a #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select c.ctinyint +POSTHOOK: query: select c.ctinyint from small_alltypesorc_a c left outer join small_alltypesorc_a hd on hd.ctinyint = c.ctinyint @@ -639,9 +629,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint +PREHOOK: query: select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cint = c.cint @@ -651,9 +639,7 @@ left outer join small_alltypesorc_a hd PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_a #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint +POSTHOOK: query: select count(*), sum(t1.c_ctinyint) from (select c.ctinyint as c_ctinyint from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cint = c.cint diff --git a/ql/src/test/results/clientpositive/vector_outer_join2.q.out b/ql/src/test/results/clientpositive/vector_outer_join2.q.out index 98c65d4..07d6107 100644 --- a/ql/src/test/results/clientpositive/vector_outer_join2.q.out +++ b/ql/src/test/results/clientpositive/vector_outer_join2.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Using cint and cbigint in test queries -create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +PREHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default PREHOOK: Output: default@small_alltypesorc1a -POSTHOOK: query: -- Using cint and cbigint in test queries -create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +POSTHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cbigint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default @@ -343,9 +341,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint +PREHOOK: query: select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cint = c.cint @@ -355,9 +351,7 @@ left outer join small_alltypesorc_a hd PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_a #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint +POSTHOOK: query: select count(*), sum(t1.c_cbigint) from (select c.cbigint as c_cbigint from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cint = c.cint diff --git a/ql/src/test/results/clientpositive/vector_outer_join3.q.out b/ql/src/test/results/clientpositive/vector_outer_join3.q.out index a6d5c59..52271c7 100644 --- a/ql/src/test/results/clientpositive/vector_outer_join3.q.out +++ b/ql/src/test/results/clientpositive/vector_outer_join3.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Using cint and cstring1 in test queries -create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +PREHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default PREHOOK: Output: default@small_alltypesorc1a -POSTHOOK: query: -- Using cint and cstring1 in test queries -create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 +POSTHOOK: query: create table small_alltypesorc1a as select * from alltypesorc where cint is not null and cstring1 is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 5 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default @@ -342,9 +340,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select count(*) from (select c.cstring1 +PREHOOK: query: select count(*) from (select c.cstring1 from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cint = c.cint @@ -354,9 +350,7 @@ left outer join small_alltypesorc_a hd PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_a #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select count(*) from (select c.cstring1 +POSTHOOK: query: select count(*) from (select c.cstring1 from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cint = c.cint @@ -483,9 +477,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select count(*) from (select c.cstring1 +PREHOOK: query: select count(*) from (select c.cstring1 from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cstring2 = c.cstring2 @@ -495,9 +487,7 @@ left outer join small_alltypesorc_a hd PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_a #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select count(*) from (select c.cstring1 +POSTHOOK: query: select count(*) from (select c.cstring1 from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cstring2 = c.cstring2 @@ -624,9 +614,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select count(*) from (select c.cstring1 +PREHOOK: query: select count(*) from (select c.cstring1 from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint @@ -636,9 +624,7 @@ left outer join small_alltypesorc_a hd PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_a #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select count(*) from (select c.cstring1 +POSTHOOK: query: select count(*) from (select c.cstring1 from small_alltypesorc_a c left outer join small_alltypesorc_a cd on cd.cstring2 = c.cstring2 and cd.cbigint = c.cbigint diff --git a/ql/src/test/results/clientpositive/vector_outer_join4.q.out b/ql/src/test/results/clientpositive/vector_outer_join4.q.out index b6def86..610f7a7 100644 --- a/ql/src/test/results/clientpositive/vector_outer_join4.q.out +++ b/ql/src/test/results/clientpositive/vector_outer_join4.q.out @@ -1,11 +1,9 @@ -PREHOOK: query: -- Using cint and ctinyint in test queries -create table small_alltypesorc1b as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10 +PREHOOK: query: create table small_alltypesorc1b as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10 PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@alltypesorc PREHOOK: Output: database:default PREHOOK: Output: default@small_alltypesorc1b -POSTHOOK: query: -- Using cint and ctinyint in test queries -create table small_alltypesorc1b as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10 +POSTHOOK: query: create table small_alltypesorc1b as select * from alltypesorc where cint is not null and ctinyint is not null order by ctinyint, csmallint, cint, cbigint, cfloat, cdouble, cstring1, cstring2, ctimestamp1, ctimestamp2, cboolean1, cboolean2 limit 10 POSTHOOK: type: CREATETABLE_AS_SELECT POSTHOOK: Input: default@alltypesorc POSTHOOK: Output: database:default @@ -319,18 +317,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select * +PREHOOK: query: select * from small_alltypesorc_b c left outer join small_alltypesorc_b cd on cd.cint = c.cint PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_b #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select * +POSTHOOK: query: select * from small_alltypesorc_b c left outer join small_alltypesorc_b cd on cd.cint = c.cint @@ -464,18 +458,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select c.ctinyint +PREHOOK: query: select c.ctinyint from small_alltypesorc_b c left outer join small_alltypesorc_b hd on hd.ctinyint = c.ctinyint PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_b #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select c.ctinyint +POSTHOOK: query: select c.ctinyint from small_alltypesorc_b c left outer join small_alltypesorc_b hd on hd.ctinyint = c.ctinyint @@ -1008,9 +998,7 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- SORT_QUERY_RESULTS - -select count(*) from (select c.ctinyint +PREHOOK: query: select count(*) from (select c.ctinyint from small_alltypesorc_b c left outer join small_alltypesorc_b cd on cd.cint = c.cint @@ -1020,9 +1008,7 @@ left outer join small_alltypesorc_b hd PREHOOK: type: QUERY PREHOOK: Input: default@small_alltypesorc_b #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -select count(*) from (select c.ctinyint +POSTHOOK: query: select count(*) from (select c.ctinyint from small_alltypesorc_b c left outer join small_alltypesorc_b cd on cd.cint = c.cint diff --git a/ql/src/test/results/clientpositive/vector_outer_join6.q.out b/ql/src/test/results/clientpositive/vector_outer_join6.q.out index 8c09716..7bcb1a9 100644 --- a/ql/src/test/results/clientpositive/vector_outer_join6.q.out +++ b/ql/src/test/results/clientpositive/vector_outer_join6.q.out @@ -1,13 +1,9 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table TJOIN1_txt (RNUM int , C1 int, C2 int) +PREHOOK: query: create table TJOIN1_txt (RNUM int , C1 int, C2 int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@TJOIN1_txt -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table TJOIN1_txt (RNUM int , C1 int, C2 int) +POSTHOOK: query: create table TJOIN1_txt (RNUM int , C1 int, C2 int) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' LINES TERMINATED BY '\n' POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default diff --git a/ql/src/test/results/clientpositive/vector_reduce1.q.out b/ql/src/test/results/clientpositive/vector_reduce1.q.out index fe69ebd..f22ac16 100644 --- a/ql/src/test/results/clientpositive/vector_reduce1.q.out +++ b/ql/src/test/results/clientpositive/vector_reduce1.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table vectortab2k( +PREHOOK: query: create table vectortab2k( t tinyint, si smallint, i int, @@ -19,9 +17,7 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@vectortab2k -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table vectortab2k( +POSTHOOK: query: create table vectortab2k( t tinyint, si smallint, i int, diff --git a/ql/src/test/results/clientpositive/vector_reduce2.q.out b/ql/src/test/results/clientpositive/vector_reduce2.q.out index 22d95ad..8f5b618 100644 --- a/ql/src/test/results/clientpositive/vector_reduce2.q.out +++ b/ql/src/test/results/clientpositive/vector_reduce2.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table vectortab2k( +PREHOOK: query: create table vectortab2k( t tinyint, si smallint, i int, @@ -19,9 +17,7 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@vectortab2k -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table vectortab2k( +POSTHOOK: query: create table vectortab2k( t tinyint, si smallint, i int, diff --git a/ql/src/test/results/clientpositive/vector_reduce3.q.out b/ql/src/test/results/clientpositive/vector_reduce3.q.out index c7dffd8..f4220e3 100644 --- a/ql/src/test/results/clientpositive/vector_reduce3.q.out +++ b/ql/src/test/results/clientpositive/vector_reduce3.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table vectortab2k( +PREHOOK: query: create table vectortab2k( t tinyint, si smallint, i int, @@ -19,9 +17,7 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@vectortab2k -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table vectortab2k( +POSTHOOK: query: create table vectortab2k( t tinyint, si smallint, i int, diff --git a/ql/src/test/results/clientpositive/vector_string_concat.q.out b/ql/src/test/results/clientpositive/vector_string_concat.q.out index 39a64d8..30ee10f 100644 --- a/ql/src/test/results/clientpositive/vector_string_concat.q.out +++ b/ql/src/test/results/clientpositive/vector_string_concat.q.out @@ -6,8 +6,7 @@ PREHOOK: query: DROP TABLE over1korc PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE over1korc POSTHOOK: type: DROPTABLE -PREHOOK: query: -- data setup -CREATE TABLE over1k(t tinyint, +PREHOOK: query: CREATE TABLE over1k(t tinyint, si smallint, i int, b bigint, @@ -23,8 +22,7 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@over1k -POSTHOOK: query: -- data setup -CREATE TABLE over1k(t tinyint, +POSTHOOK: query: CREATE TABLE over1k(t tinyint, si smallint, i int, b bigint, @@ -174,9 +172,7 @@ sarah garcia sarah garcia | sarah garcia| zach young zach young | zach young| david underhill david underhill | david underhill| yuri carson yuri carson | yuri carson| -PREHOOK: query: ------------------------------------------------------------------------------------------ - -create table vectortab2k( +PREHOOK: query: create table vectortab2k( t tinyint, si smallint, i int, @@ -195,9 +191,7 @@ STORED AS TEXTFILE PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@vectortab2k -POSTHOOK: query: ------------------------------------------------------------------------------------------ - -create table vectortab2k( +POSTHOOK: query: create table vectortab2k( t tinyint, si smallint, i int, diff --git a/ql/src/test/results/clientpositive/vector_struct_in.q.out b/ql/src/test/results/clientpositive/vector_struct_in.q.out index 346d531..c1ce773 100644 --- a/ql/src/test/results/clientpositive/vector_struct_in.q.out +++ b/ql/src/test/results/clientpositive/vector_struct_in.q.out @@ -1,14 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- 2 Strings -create table test_1 (`id` string, `lineid` string) stored as orc +PREHOOK: query: create table test_1 (`id` string, `lineid` string) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_1 -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- 2 Strings -create table test_1 (`id` string, `lineid` string) stored as orc +POSTHOOK: query: create table test_1 (`id` string, `lineid` string) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_1 @@ -203,13 +197,11 @@ POSTHOOK: Input: default@test_1 #### A masked pattern was here #### one 1 true seven 1 true -PREHOOK: query: -- 2 Integers -create table test_2 (`id` int, `lineid` int) stored as orc +PREHOOK: query: create table test_2 (`id` int, `lineid` int) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_2 -POSTHOOK: query: -- 2 Integers -create table test_2 (`id` int, `lineid` int) stored as orc +POSTHOOK: query: create table test_2 (`id` int, `lineid` int) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_2 @@ -404,13 +396,11 @@ POSTHOOK: Input: default@test_2 #### A masked pattern was here #### 1 1 true 7 1 true -PREHOOK: query: -- 1 String and 1 Integer -create table test_3 (`id` string, `lineid` int) stored as orc +PREHOOK: query: create table test_3 (`id` string, `lineid` int) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_3 -POSTHOOK: query: -- 1 String and 1 Integer -create table test_3 (`id` string, `lineid` int) stored as orc +POSTHOOK: query: create table test_3 (`id` string, `lineid` int) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_3 @@ -605,13 +595,11 @@ POSTHOOK: Input: default@test_3 #### A masked pattern was here #### one 1 true seven 1 true -PREHOOK: query: -- 1 Integer and 1 String and 1 Double -create table test_4 (`my_bigint` bigint, `my_string` string, `my_double` double) stored as orc +PREHOOK: query: create table test_4 (`my_bigint` bigint, `my_string` string, `my_double` double) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@test_4 -POSTHOOK: query: -- 1 Integer and 1 String and 1 Double -create table test_4 (`my_bigint` bigint, `my_string` string, `my_double` double) stored as orc +POSTHOOK: query: create table test_4 (`my_bigint` bigint, `my_string` string, `my_double` double) stored as orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@test_4 diff --git a/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out b/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out index 7e5f24d..d1a7de6 100644 --- a/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out +++ b/ql/src/test/results/clientpositive/vector_tablesample_rows.q.out @@ -167,12 +167,10 @@ POSTHOOK: query: drop table decimal_2 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@decimal_2 POSTHOOK: Output: default@decimal_2 -PREHOOK: query: -- Dummy tables HIVE-13190 -explain +PREHOOK: query: explain select count(1) from (select * from (Select 1 a) x order by x.a) y PREHOOK: type: QUERY -POSTHOOK: query: -- Dummy tables HIVE-13190 -explain +POSTHOOK: query: explain select count(1) from (select * from (Select 1 a) x order by x.a) y POSTHOOK: type: QUERY Explain diff --git a/ql/src/test/results/clientpositive/vector_varchar_mapjoin1.q.out b/ql/src/test/results/clientpositive/vector_varchar_mapjoin1.q.out index a6c4de9..c96d04d 100644 --- a/ql/src/test/results/clientpositive/vector_varchar_mapjoin1.q.out +++ b/ql/src/test/results/clientpositive/vector_varchar_mapjoin1.q.out @@ -124,11 +124,9 @@ POSTHOOK: Output: database:default POSTHOOK: Output: default@varchar_join1_str_orc POSTHOOK: Lineage: varchar_join1_str_orc.c1 SIMPLE [(varchar_join1_str)varchar_join1_str.FieldSchema(name:c1, type:int, comment:null), ] POSTHOOK: Lineage: varchar_join1_str_orc.c2 SIMPLE [(varchar_join1_str)varchar_join1_str.FieldSchema(name:c2, type:string, comment:null), ] -PREHOOK: query: -- Join varchar with same length varchar -explain select * from varchar_join1_vc1_orc a join varchar_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1 +PREHOOK: query: explain select * from varchar_join1_vc1_orc a join varchar_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1 PREHOOK: type: QUERY -POSTHOOK: query: -- Join varchar with same length varchar -explain select * from varchar_join1_vc1_orc a join varchar_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1 +POSTHOOK: query: explain select * from varchar_join1_vc1_orc a join varchar_join1_vc1_orc b on (a.c2 = b.c2) order by a.c1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-5 is a root stage @@ -218,11 +216,9 @@ POSTHOOK: Input: default@varchar_join1_vc1_orc 1 abc 1 abc 2 abc 2 abc 3 abc 3 abc -PREHOOK: query: -- Join varchar with different length varchar -explain select * from varchar_join1_vc1_orc a join varchar_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1 +PREHOOK: query: explain select * from varchar_join1_vc1_orc a join varchar_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1 PREHOOK: type: QUERY -POSTHOOK: query: -- Join varchar with different length varchar -explain select * from varchar_join1_vc1_orc a join varchar_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1 +POSTHOOK: query: explain select * from varchar_join1_vc1_orc a join varchar_join1_vc2_orc b on (a.c2 = b.c2) order by a.c1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-5 is a root stage @@ -314,11 +310,9 @@ POSTHOOK: Input: default@varchar_join1_vc2_orc 1 abc 1 abc 2 abc 2 abc 3 abc 3 abc -PREHOOK: query: -- Join varchar with string -explain select * from varchar_join1_vc1_orc a join varchar_join1_str_orc b on (a.c2 = b.c2) order by a.c1 +PREHOOK: query: explain select * from varchar_join1_vc1_orc a join varchar_join1_str_orc b on (a.c2 = b.c2) order by a.c1 PREHOOK: type: QUERY -POSTHOOK: query: -- Join varchar with string -explain select * from varchar_join1_vc1_orc a join varchar_join1_str_orc b on (a.c2 = b.c2) order by a.c1 +POSTHOOK: query: explain select * from varchar_join1_vc1_orc a join varchar_join1_str_orc b on (a.c2 = b.c2) order by a.c1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-5 is a root stage diff --git a/ql/src/test/results/clientpositive/vector_varchar_simple.q.out b/ql/src/test/results/clientpositive/vector_varchar_simple.q.out index 49d8b34..b2f5b0c 100644 --- a/ql/src/test/results/clientpositive/vector_varchar_simple.q.out +++ b/ql/src/test/results/clientpositive/vector_varchar_simple.q.out @@ -99,16 +99,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- should match the query from src -select key, value +PREHOOK: query: select key, value from varchar_2 order by key asc limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@varchar_2 #### A masked pattern was here #### -POSTHOOK: query: -- should match the query from src -select key, value +POSTHOOK: query: select key, value from varchar_2 order by key asc limit 5 @@ -193,16 +191,14 @@ STAGE PLANS: Processor Tree: ListSink -PREHOOK: query: -- should match the query from src -select key, value +PREHOOK: query: select key, value from varchar_2 order by key desc limit 5 PREHOOK: type: QUERY PREHOOK: Input: default@varchar_2 #### A masked pattern was here #### -POSTHOOK: query: -- should match the query from src -select key, value +POSTHOOK: query: select key, value from varchar_2 order by key desc limit 5 @@ -222,15 +218,13 @@ POSTHOOK: query: drop table varchar_2 POSTHOOK: type: DROPTABLE POSTHOOK: Input: default@varchar_2 POSTHOOK: Output: default@varchar_2 -PREHOOK: query: -- Implicit conversion. Occurs in reduce-side under Tez. -create table varchar_3 ( +PREHOOK: query: create table varchar_3 ( field varchar(25) ) stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@varchar_3 -POSTHOOK: query: -- Implicit conversion. Occurs in reduce-side under Tez. -create table varchar_3 ( +POSTHOOK: query: create table varchar_3 ( field varchar(25) ) stored as orc POSTHOOK: type: CREATETABLE diff --git a/ql/src/test/results/clientpositive/vector_when_case_null.q.out b/ql/src/test/results/clientpositive/vector_when_case_null.q.out index fb56ff9..93f88a6 100644 --- a/ql/src/test/results/clientpositive/vector_when_case_null.q.out +++ b/ql/src/test/results/clientpositive/vector_when_case_null.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -create table count_case_groupby (key string, bool boolean) STORED AS orc +PREHOOK: query: create table count_case_groupby (key string, bool boolean) STORED AS orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@count_case_groupby -POSTHOOK: query: -- SORT_QUERY_RESULTS - -create table count_case_groupby (key string, bool boolean) STORED AS orc +POSTHOOK: query: create table count_case_groupby (key string, bool boolean) STORED AS orc POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@count_case_groupby diff --git a/ql/src/test/results/clientpositive/vectorization_1.q.out b/ql/src/test/results/clientpositive/vectorization_1.q.out index 1e4c00c..e0a4344 100644 --- a/ql/src/test/results/clientpositive/vectorization_1.q.out +++ b/ql/src/test/results/clientpositive/vectorization_1.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT VAR_POP(ctinyint), +PREHOOK: query: SELECT VAR_POP(ctinyint), (VAR_POP(ctinyint) / -26.28), SUM(cfloat), (-1.389 + SUM(cfloat)), @@ -22,9 +20,7 @@ WHERE (((cdouble > ctinyint) PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT VAR_POP(ctinyint), +POSTHOOK: query: SELECT VAR_POP(ctinyint), (VAR_POP(ctinyint) / -26.28), SUM(cfloat), (-1.389 + SUM(cfloat)), diff --git a/ql/src/test/results/clientpositive/vectorization_10.q.out b/ql/src/test/results/clientpositive/vectorization_10.q.out index ba281f7..9dad4c4 100644 --- a/ql/src/test/results/clientpositive/vectorization_10.q.out +++ b/ql/src/test/results/clientpositive/vectorization_10.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT cdouble, +PREHOOK: query: SELECT cdouble, ctimestamp1, ctinyint, cboolean1, @@ -25,9 +23,7 @@ WHERE (((cstring2 <= '10') PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT cdouble, +POSTHOOK: query: SELECT cdouble, ctimestamp1, ctinyint, cboolean1, diff --git a/ql/src/test/results/clientpositive/vectorization_11.q.out b/ql/src/test/results/clientpositive/vectorization_11.q.out index 0fe4c48..dff58da 100644 --- a/ql/src/test/results/clientpositive/vectorization_11.q.out +++ b/ql/src/test/results/clientpositive/vectorization_11.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT cstring1, +PREHOOK: query: SELECT cstring1, cboolean1, cdouble, ctimestamp1, @@ -16,9 +14,7 @@ WHERE ((cstring2 = cstring1) PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT cstring1, +POSTHOOK: query: SELECT cstring1, cboolean1, cdouble, ctimestamp1, diff --git a/ql/src/test/results/clientpositive/vectorization_12.q.out b/ql/src/test/results/clientpositive/vectorization_12.q.out index a199fe8..6a7f69c 100644 --- a/ql/src/test/results/clientpositive/vectorization_12.q.out +++ b/ql/src/test/results/clientpositive/vectorization_12.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT cbigint, +PREHOOK: query: SELECT cbigint, cboolean1, cstring1, ctimestamp1, @@ -33,9 +31,7 @@ ORDER BY ctimestamp1, cdouble, cbigint, cstring1 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT cbigint, +POSTHOOK: query: SELECT cbigint, cboolean1, cstring1, ctimestamp1, diff --git a/ql/src/test/results/clientpositive/vectorization_13.q.out b/ql/src/test/results/clientpositive/vectorization_13.q.out index e82e474..99c99d7 100644 --- a/ql/src/test/results/clientpositive/vectorization_13.q.out +++ b/ql/src/test/results/clientpositive/vectorization_13.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT cboolean1, ctinyint, ctimestamp1, @@ -33,9 +31,7 @@ GROUP BY cboolean1, ctinyint, ctimestamp1, cfloat, cstring1 ORDER BY cboolean1, ctinyint, ctimestamp1, cfloat, cstring1, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16 LIMIT 40 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT cboolean1, ctinyint, ctimestamp1, @@ -257,8 +253,7 @@ NULL -63 1969-12-31 16:00:15.436 -63.0 NULL 63 -63 0 -63.0 -0.0 63.0 -5011.839 0 NULL -64 1969-12-31 16:00:11.912 -64.0 NULL 64 -64 0 -64.0 -0.0 64.0 -5091.392 0.0 64.0 0.0 -10.175 -64.0 0.410625 -64.0 0.0 -64 NULL -64 1969-12-31 16:00:12.339 -64.0 NULL 64 -64 0 -64.0 -0.0 64.0 -5091.392 0.0 64.0 0.0 -10.175 -64.0 0.410625 -64.0 0.0 -64 NULL -64 1969-12-31 16:00:13.274 -64.0 NULL 64 -64 0 -64.0 -0.0 64.0 -5091.392 0.0 64.0 0.0 -10.175 -64.0 0.410625 -64.0 0.0 -64 -PREHOOK: query: -- double compare timestamp -EXPLAIN +PREHOOK: query: EXPLAIN SELECT cboolean1, ctinyint, ctimestamp1, @@ -291,8 +286,7 @@ GROUP BY cboolean1, ctinyint, ctimestamp1, cfloat, cstring1 ORDER BY cboolean1, ctinyint, ctimestamp1, cfloat, cstring1, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12, c13, c14, c15, c16 LIMIT 40 PREHOOK: type: QUERY -POSTHOOK: query: -- double compare timestamp -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT cboolean1, ctinyint, ctimestamp1, diff --git a/ql/src/test/results/clientpositive/vectorization_14.q.out b/ql/src/test/results/clientpositive/vectorization_14.q.out index 34923d6..ef44f65 100644 --- a/ql/src/test/results/clientpositive/vectorization_14.q.out +++ b/ql/src/test/results/clientpositive/vectorization_14.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT ctimestamp1, cfloat, cstring1, @@ -33,9 +31,7 @@ WHERE (((ctinyint <= cbigint) GROUP BY ctimestamp1, cfloat, cstring1, cboolean1, cdouble ORDER BY cstring1, cfloat, cdouble, ctimestamp1 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT ctimestamp1, cfloat, cstring1, diff --git a/ql/src/test/results/clientpositive/vectorization_15.q.out b/ql/src/test/results/clientpositive/vectorization_15.q.out index a2edab4..5de2092 100644 --- a/ql/src/test/results/clientpositive/vectorization_15.q.out +++ b/ql/src/test/results/clientpositive/vectorization_15.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT cfloat, cboolean1, cdouble, @@ -31,9 +29,7 @@ WHERE (((cstring2 LIKE '%ss%') GROUP BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1 ORDER BY cfloat, cboolean1, cdouble, cstring1, ctinyint, cint, ctimestamp1 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT cfloat, cboolean1, cdouble, diff --git a/ql/src/test/results/clientpositive/vectorization_16.q.out b/ql/src/test/results/clientpositive/vectorization_16.q.out index ece5154..d93d810 100644 --- a/ql/src/test/results/clientpositive/vectorization_16.q.out +++ b/ql/src/test/results/clientpositive/vectorization_16.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT cstring1, cdouble, ctimestamp1, @@ -20,9 +18,7 @@ WHERE ((cstring2 LIKE '%b%') OR (cstring1 < 'a'))) GROUP BY cstring1, cdouble, ctimestamp1 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT cstring1, cdouble, ctimestamp1, diff --git a/ql/src/test/results/clientpositive/vectorization_17.q.out b/ql/src/test/results/clientpositive/vectorization_17.q.out index e01bea2..9014fe4 100644 --- a/ql/src/test/results/clientpositive/vectorization_17.q.out +++ b/ql/src/test/results/clientpositive/vectorization_17.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +PREHOOK: query: EXPLAIN SELECT cfloat, cstring1, cint, @@ -24,9 +22,7 @@ WHERE (((cbigint > -23) OR (cfloat = cdouble)))) ORDER BY cbigint, cfloat PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - -EXPLAIN +POSTHOOK: query: EXPLAIN SELECT cfloat, cstring1, cint, diff --git a/ql/src/test/results/clientpositive/vectorization_6.q.out b/ql/src/test/results/clientpositive/vectorization_6.q.out index 2af0885..13897f6 100644 --- a/ql/src/test/results/clientpositive/vectorization_6.q.out +++ b/ql/src/test/results/clientpositive/vectorization_6.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - -SELECT cboolean1, +PREHOOK: query: SELECT cboolean1, cfloat, cstring1, (988888 * csmallint), @@ -22,9 +20,7 @@ WHERE ((ctinyint != 0) PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### -POSTHOOK: query: -- SORT_QUERY_RESULTS - -SELECT cboolean1, +POSTHOOK: query: SELECT cboolean1, cfloat, cstring1, (988888 * csmallint), diff --git a/ql/src/test/results/clientpositive/vectorized_casts.q.out b/ql/src/test/results/clientpositive/vectorized_casts.q.out index e39bff4..b1cd765 100644 --- a/ql/src/test/results/clientpositive/vectorized_casts.q.out +++ b/ql/src/test/results/clientpositive/vectorized_casts.q.out @@ -1,11 +1,6 @@ -PREHOOK: query: -- SORT_QUERY_RESULTS - --- Currently, vectorization is not supported in fetch task (hive.fetch.task.conversion=none) --- Test type casting in vectorized mode to verify end-to-end functionality. - -explain +PREHOOK: query: explain select --- to boolean + cast (ctinyint as boolean) ,cast (csmallint as boolean) ,cast (cint as boolean) @@ -16,7 +11,7 @@ select ,cast (cbigint * 0 as boolean) ,cast (ctimestamp1 as boolean) ,cast (cstring1 as boolean) --- to int family + ,cast (ctinyint as int) ,cast (csmallint as int) ,cast (cint as int) @@ -30,7 +25,7 @@ select ,cast (cfloat as tinyint) ,cast (cfloat as smallint) ,cast (cfloat as bigint) --- to float family + ,cast (ctinyint as double) ,cast (csmallint as double) ,cast (cint as double) @@ -43,7 +38,7 @@ select ,cast (substr(cstring1, 1, 1) as double) ,cast (cint as float) ,cast (cdouble as float) --- to timestamp + ,cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -55,7 +50,7 @@ select ,cast (ctimestamp1 as timestamp) ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) --- to string + ,cast (ctinyint as string) ,cast (csmallint as string) ,cast (cint as string) @@ -68,23 +63,18 @@ select ,cast (cstring1 as string) ,cast (cast (cstring1 as char(10)) as string) ,cast (cast (cstring1 as varchar(10)) as string) --- nested and expression arguments + ,cast (cast (cfloat as int) as float) ,cast (cint * 2 as double) ,cast (sin(cfloat) as string) ,cast (cint as float) + cast(cboolean1 as double) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 PREHOOK: type: QUERY -POSTHOOK: query: -- SORT_QUERY_RESULTS - --- Currently, vectorization is not supported in fetch task (hive.fetch.task.conversion=none) --- Test type casting in vectorized mode to verify end-to-end functionality. - -explain +POSTHOOK: query: explain select --- to boolean + cast (ctinyint as boolean) ,cast (csmallint as boolean) ,cast (cint as boolean) @@ -95,7 +85,7 @@ select ,cast (cbigint * 0 as boolean) ,cast (ctimestamp1 as boolean) ,cast (cstring1 as boolean) --- to int family + ,cast (ctinyint as int) ,cast (csmallint as int) ,cast (cint as int) @@ -109,7 +99,7 @@ select ,cast (cfloat as tinyint) ,cast (cfloat as smallint) ,cast (cfloat as bigint) --- to float family + ,cast (ctinyint as double) ,cast (csmallint as double) ,cast (cint as double) @@ -122,7 +112,7 @@ select ,cast (substr(cstring1, 1, 1) as double) ,cast (cint as float) ,cast (cdouble as float) --- to timestamp + ,cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -134,7 +124,7 @@ select ,cast (ctimestamp1 as timestamp) ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) --- to string + ,cast (ctinyint as string) ,cast (csmallint as string) ,cast (cint as string) @@ -147,13 +137,13 @@ select ,cast (cstring1 as string) ,cast (cast (cstring1 as char(10)) as string) ,cast (cast (cstring1 as varchar(10)) as string) --- nested and expression arguments + ,cast (cast (cfloat as int) as float) ,cast (cint * 2 as double) ,cast (sin(cfloat) as string) ,cast (cint as float) + cast(cboolean1 as double) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -189,7 +179,7 @@ STAGE PLANS: ListSink PREHOOK: query: select --- to boolean + cast (ctinyint as boolean) ,cast (csmallint as boolean) ,cast (cint as boolean) @@ -200,7 +190,7 @@ PREHOOK: query: select ,cast (cbigint * 0 as boolean) ,cast (ctimestamp1 as boolean) ,cast (cstring1 as boolean) --- to int family + ,cast (ctinyint as int) ,cast (csmallint as int) ,cast (cint as int) @@ -214,7 +204,7 @@ PREHOOK: query: select ,cast (cfloat as tinyint) ,cast (cfloat as smallint) ,cast (cfloat as bigint) --- to float family + ,cast (ctinyint as double) ,cast (csmallint as double) ,cast (cint as double) @@ -227,7 +217,7 @@ PREHOOK: query: select ,cast (substr(cstring1, 1, 1) as double) ,cast (cint as float) ,cast (cdouble as float) --- to timestamp + ,cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -239,7 +229,7 @@ PREHOOK: query: select ,cast (ctimestamp1 as timestamp) ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) --- to string + ,cast (ctinyint as string) ,cast (csmallint as string) ,cast (cint as string) @@ -252,19 +242,19 @@ PREHOOK: query: select ,cast (cstring1 as string) ,cast (cast (cstring1 as char(10)) as string) ,cast (cast (cstring1 as varchar(10)) as string) --- nested and expression arguments + ,cast (cast (cfloat as int) as float) ,cast (cint * 2 as double) ,cast (sin(cfloat) as string) ,cast (cint as float) + cast(cboolean1 as double) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### POSTHOOK: query: select --- to boolean + cast (ctinyint as boolean) ,cast (csmallint as boolean) ,cast (cint as boolean) @@ -275,7 +265,7 @@ POSTHOOK: query: select ,cast (cbigint * 0 as boolean) ,cast (ctimestamp1 as boolean) ,cast (cstring1 as boolean) --- to int family + ,cast (ctinyint as int) ,cast (csmallint as int) ,cast (cint as int) @@ -289,7 +279,7 @@ POSTHOOK: query: select ,cast (cfloat as tinyint) ,cast (cfloat as smallint) ,cast (cfloat as bigint) --- to float family + ,cast (ctinyint as double) ,cast (csmallint as double) ,cast (cint as double) @@ -302,7 +292,7 @@ POSTHOOK: query: select ,cast (substr(cstring1, 1, 1) as double) ,cast (cint as float) ,cast (cdouble as float) --- to timestamp + ,cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -314,7 +304,7 @@ POSTHOOK: query: select ,cast (ctimestamp1 as timestamp) ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) --- to string + ,cast (ctinyint as string) ,cast (csmallint as string) ,cast (cint as string) @@ -327,13 +317,13 @@ POSTHOOK: query: select ,cast (cstring1 as string) ,cast (cast (cstring1 as char(10)) as string) ,cast (cast (cstring1 as varchar(10)) as string) --- nested and expression arguments + ,cast (cast (cfloat as int) as float) ,cast (cint * 2 as double) ,cast (sin(cfloat) as string) ,cast (cint as float) + cast(cboolean1 as double) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc diff --git a/ql/src/test/results/clientpositive/vectorized_string_funcs.q.out b/ql/src/test/results/clientpositive/vectorized_string_funcs.q.out index ca938b0..7b34452 100644 --- a/ql/src/test/results/clientpositive/vectorized_string_funcs.q.out +++ b/ql/src/test/results/clientpositive/vectorized_string_funcs.q.out @@ -1,6 +1,4 @@ -PREHOOK: query: -- Test string functions in vectorized mode to verify end-to-end functionality. - -explain +PREHOOK: query: explain select substr(cstring1, 1, 2) ,substr(cstring1, 2) @@ -16,15 +14,13 @@ select ,concat(cstring1, '<') ,concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2)) from alltypesorc --- Limit the number of rows of output to a reasonable amount. + where cbigint % 237 = 0 --- Test function use in the WHERE clause. + and length(substr(cstring1, 1, 2)) <= 2 and cstring1 like '%' PREHOOK: type: QUERY -POSTHOOK: query: -- Test string functions in vectorized mode to verify end-to-end functionality. - -explain +POSTHOOK: query: explain select substr(cstring1, 1, 2) ,substr(cstring1, 2) @@ -40,9 +36,9 @@ select ,concat(cstring1, '<') ,concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2)) from alltypesorc --- Limit the number of rows of output to a reasonable amount. + where cbigint % 237 = 0 --- Test function use in the WHERE clause. + and length(substr(cstring1, 1, 2)) <= 2 and cstring1 like '%' POSTHOOK: type: QUERY @@ -94,9 +90,9 @@ PREHOOK: query: select ,concat(cstring1, '<') ,concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2)) from alltypesorc --- Limit the number of rows of output to a reasonable amount. + where cbigint % 237 = 0 --- Test function use in the WHERE clause. + and length(substr(cstring1, 1, 2)) <= 2 and cstring1 like '%' PREHOOK: type: QUERY @@ -117,9 +113,9 @@ POSTHOOK: query: select ,concat(cstring1, '<') ,concat(substr(cstring1, 1, 2), substr(cstring2, 1, 2)) from alltypesorc --- Limit the number of rows of output to a reasonable amount. + where cbigint % 237 = 0 --- Test function use in the WHERE clause. + and length(substr(cstring1, 1, 2)) <= 2 and cstring1 like '%' POSTHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/vectorized_timestamp_ints_casts.q.out b/ql/src/test/results/clientpositive/vectorized_timestamp_ints_casts.q.out index c7f2a74..bc5ceb3 100644 --- a/ql/src/test/results/clientpositive/vectorized_timestamp_ints_casts.q.out +++ b/ql/src/test/results/clientpositive/vectorized_timestamp_ints_casts.q.out @@ -1,6 +1,6 @@ PREHOOK: query: explain select --- to timestamp + cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -13,12 +13,12 @@ select ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 PREHOOK: type: QUERY POSTHOOK: query: explain select --- to timestamp + cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -31,7 +31,7 @@ select ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -67,7 +67,7 @@ STAGE PLANS: ListSink PREHOOK: query: select --- to timestamp + cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -80,13 +80,13 @@ PREHOOK: query: select ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### POSTHOOK: query: select --- to timestamp + cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -99,7 +99,7 @@ POSTHOOK: query: select ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc @@ -132,7 +132,7 @@ POSTHOOK: Input: default@alltypesorc 1969-12-31 16:00:00.011 NULL 1969-12-27 18:49:09.583 1970-01-14 22:35:27 1969-12-31 16:00:11 NULL 1969-12-31 16:00:00.001 1969-12-31 16:00:00 1969-12-31 16:00:02.351 NULL NULL PREHOOK: query: explain select --- to timestamp + cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -145,12 +145,12 @@ select ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 PREHOOK: type: QUERY POSTHOOK: query: explain select --- to timestamp + cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -163,7 +163,7 @@ select ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 POSTHOOK: type: QUERY STAGE DEPENDENCIES: @@ -199,7 +199,7 @@ STAGE PLANS: ListSink PREHOOK: query: select --- to timestamp + cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -212,13 +212,13 @@ PREHOOK: query: select ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 PREHOOK: type: QUERY PREHOOK: Input: default@alltypesorc #### A masked pattern was here #### POSTHOOK: query: select --- to timestamp + cast (ctinyint as timestamp) ,cast (csmallint as timestamp) ,cast (cint as timestamp) @@ -231,7 +231,7 @@ POSTHOOK: query: select ,cast (cstring1 as timestamp) ,cast (substr(cstring1, 1, 1) as timestamp) from alltypesorc --- limit output to a reasonably small number of rows + where cbigint % 250 = 0 POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc diff --git a/ql/src/test/results/clientpositive/view_authorization_sqlstd.q.out b/ql/src/test/results/clientpositive/view_authorization_sqlstd.q.out index 122ca4e..3832fcf 100644 --- a/ql/src/test/results/clientpositive/view_authorization_sqlstd.q.out +++ b/ql/src/test/results/clientpositive/view_authorization_sqlstd.q.out @@ -1,12 +1,8 @@ -PREHOOK: query: -- Test view authorization , and 'show grant' variants - -create table t1(i int, j int, k int) +PREHOOK: query: create table t1(i int, j int, k int) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default PREHOOK: Output: default@t1 -POSTHOOK: query: -- Test view authorization , and 'show grant' variants - -create table t1(i int, j int, k int) +POSTHOOK: query: create table t1(i int, j int, k int) POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 @@ -24,26 +20,22 @@ default t1 user1 USER DELETE true -1 user1 default t1 user1 USER INSERT true -1 user1 default t1 user1 USER SELECT true -1 user1 default t1 user1 USER UPDATE true -1 user1 -PREHOOK: query: -- protecting certain columns -create view vt1 as select i,k from t1 +PREHOOK: query: create view vt1 as select i,k from t1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@t1 PREHOOK: Output: database:default PREHOOK: Output: default@vt1 -POSTHOOK: query: -- protecting certain columns -create view vt1 as select i,k from t1 +POSTHOOK: query: create view vt1 as select i,k from t1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@t1 POSTHOOK: Output: database:default POSTHOOK: Output: default@vt1 -PREHOOK: query: -- protecting certain rows -create view vt2 as select * from t1 where i > 1 +PREHOOK: query: create view vt2 as select * from t1 where i > 1 PREHOOK: type: CREATEVIEW PREHOOK: Input: default@t1 PREHOOK: Output: database:default PREHOOK: Output: default@vt2 -POSTHOOK: query: -- protecting certain rows -create view vt2 as select * from t1 where i > 1 +POSTHOOK: query: create view vt2 as select * from t1 where i > 1 POSTHOOK: type: CREATEVIEW POSTHOOK: Input: default@t1 POSTHOOK: Output: database:default @@ -64,16 +56,10 @@ default vt2 user1 USER DELETE true -1 user1 default vt2 user1 USER INSERT true -1 user1 default vt2 user1 USER SELECT true -1 user1 default vt2 user1 USER UPDATE true -1 user1 -PREHOOK: query: --view grant to user --- try with and without table keyword - -grant select on vt1 to user user2 +PREHOOK: query: grant select on vt1 to user user2 PREHOOK: type: GRANT_PRIVILEGE PREHOOK: Output: default@vt1 -POSTHOOK: query: --view grant to user --- try with and without table keyword - -grant select on vt1 to user user2 +POSTHOOK: query: grant select on vt1 to user user2 POSTHOOK: type: GRANT_PRIVILEGE POSTHOOK: Output: default@vt1 PREHOOK: query: grant insert on table vt1 to user user3 @@ -125,16 +111,12 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@vt1 #### A masked pattern was here #### -PREHOOK: query: -- verify input objects required does not include table --- even if view is within a sub query -select * from (select * from vt1) a +PREHOOK: query: select * from (select * from vt1) a PREHOOK: type: QUERY PREHOOK: Input: default@t1 PREHOOK: Input: default@vt1 #### A masked pattern was here #### -POSTHOOK: query: -- verify input objects required does not include table --- even if view is within a sub query -select * from (select * from vt1) a +POSTHOOK: query: select * from (select * from vt1) a POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@vt1 @@ -218,11 +200,9 @@ default vt3 user2 USER DELETE true -1 user1 default vt3 user2 USER INSERT true -1 user1 default vt3 user2 USER SELECT true -1 user1 default vt3 user2 USER UPDATE true -1 user1 -PREHOOK: query: -- grant privileges on roles for view, after next statement -show grant user user3 on table vt1 +PREHOOK: query: show grant user user3 on table vt1 PREHOOK: type: SHOW_GRANT -POSTHOOK: query: -- grant privileges on roles for view, after next statement -show grant user user3 on table vt1 +POSTHOOK: query: show grant user user3 on table vt1 POSTHOOK: type: SHOW_GRANT default vt1 user3 USER INSERT false -1 user1 PREHOOK: query: show current roles